2 * Copyright (c) 2005 Ammasso, Inc. All rights reserved.
3 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/slab.h>
39 #include <rdma/iw_cm.h>
41 int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
43 struct c2_dev *c2dev = to_c2dev(cm_id->device);
46 struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
47 struct c2_vq_req *vq_req;
50 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
55 /* Associate QP <--> CM_ID */
56 cm_id->provider_data = qp;
57 cm_id->add_ref(cm_id);
61 * only support the max private_data length
63 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
68 * Set the rdma read limits
70 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
75 * Create and send a WR_QP_CONNECT...
77 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
83 vq_req = vq_req_alloc(c2dev);
89 c2_wr_set_id(wr, CCWR_QP_CONNECT);
91 wr->rnic_handle = c2dev->adapter_handle;
92 wr->qp_handle = qp->adapter_handle;
94 wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
95 wr->remote_port = cm_id->remote_addr.sin_port;
98 * Move any private data from the callers's buf into
101 if (iw_param->private_data) {
102 wr->private_data_length =
103 cpu_to_be32(iw_param->private_data_len);
104 memcpy(&wr->private_data[0], iw_param->private_data,
105 iw_param->private_data_len);
107 wr->private_data_length = 0;
110 * Send WR to adapter. NOTE: There is no synch reply from
113 err = vq_send_wr(c2dev, (union c2wr *) wr);
114 vq_req_free(c2dev, vq_req);
121 * If we fail, release reference on QP and
122 * disassociate QP from CM_ID
124 cm_id->provider_data = NULL;
126 cm_id->rem_ref(cm_id);
131 int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
133 struct c2_dev *c2dev;
134 struct c2wr_ep_listen_create_req wr;
135 struct c2wr_ep_listen_create_rep *reply;
136 struct c2_vq_req *vq_req;
139 c2dev = to_c2dev(cm_id->device);
144 * Allocate verbs request.
146 vq_req = vq_req_alloc(c2dev);
153 c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
154 wr.hdr.context = (u64) (unsigned long) vq_req;
155 wr.rnic_handle = c2dev->adapter_handle;
156 wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
157 wr.local_port = cm_id->local_addr.sin_port;
158 wr.backlog = cpu_to_be32(backlog);
159 wr.user_context = (u64) (unsigned long) cm_id;
162 * Reference the request struct. Dereferenced in the int handler.
164 vq_req_get(c2dev, vq_req);
169 err = vq_send_wr(c2dev, (union c2wr *) & wr);
171 vq_req_put(c2dev, vq_req);
176 * Wait for reply from adapter
178 err = vq_wait_for_reply(c2dev, vq_req);
186 (struct c2wr_ep_listen_create_rep *) (unsigned long) vq_req->reply_msg;
192 if ((err = c2_errno(reply)) != 0)
196 * Keep the adapter handle. Used in subsequent destroy
198 cm_id->provider_data = (void*)(unsigned long) reply->ep_handle;
203 vq_repbuf_free(c2dev, reply);
204 vq_req_free(c2dev, vq_req);
209 vq_repbuf_free(c2dev, reply);
211 vq_req_free(c2dev, vq_req);
216 int c2_llp_service_destroy(struct iw_cm_id *cm_id)
219 struct c2_dev *c2dev;
220 struct c2wr_ep_listen_destroy_req wr;
221 struct c2wr_ep_listen_destroy_rep *reply;
222 struct c2_vq_req *vq_req;
225 c2dev = to_c2dev(cm_id->device);
230 * Allocate verbs request.
232 vq_req = vq_req_alloc(c2dev);
239 c2_wr_set_id(&wr, CCWR_EP_LISTEN_DESTROY);
240 wr.hdr.context = (unsigned long) vq_req;
241 wr.rnic_handle = c2dev->adapter_handle;
242 wr.ep_handle = (u32)(unsigned long)cm_id->provider_data;
245 * reference the request struct. dereferenced in the int handler.
247 vq_req_get(c2dev, vq_req);
252 err = vq_send_wr(c2dev, (union c2wr *) & wr);
254 vq_req_put(c2dev, vq_req);
259 * Wait for reply from adapter
261 err = vq_wait_for_reply(c2dev, vq_req);
268 reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg;
273 if ((err = c2_errno(reply)) != 0)
277 vq_repbuf_free(c2dev, reply);
279 vq_req_free(c2dev, vq_req);
283 int c2_llp_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
285 struct c2_dev *c2dev = to_c2dev(cm_id->device);
288 struct c2wr_cr_accept_req *wr; /* variable length WR */
289 struct c2_vq_req *vq_req;
290 struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */
293 ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
298 /* Set the RDMA read limits */
299 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird);
303 /* Allocate verbs request. */
304 vq_req = vq_req_alloc(c2dev);
310 vq_req->cm_id = cm_id;
311 vq_req->event = IW_CM_EVENT_ESTABLISHED;
313 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL);
320 c2_wr_set_id(wr, CCWR_CR_ACCEPT);
321 wr->hdr.context = (unsigned long) vq_req;
322 wr->rnic_handle = c2dev->adapter_handle;
323 wr->ep_handle = (u32) (unsigned long) cm_id->provider_data;
324 wr->qp_handle = qp->adapter_handle;
326 /* Replace the cr_handle with the QP after accept */
327 cm_id->provider_data = qp;
328 cm_id->add_ref(cm_id);
331 cm_id->provider_data = qp;
333 /* Validate private_data length */
334 if (iw_param->private_data_len > C2_MAX_PRIVATE_DATA_SIZE) {
339 if (iw_param->private_data) {
340 wr->private_data_length = cpu_to_be32(iw_param->private_data_len);
341 memcpy(&wr->private_data[0],
342 iw_param->private_data, iw_param->private_data_len);
344 wr->private_data_length = 0;
346 /* Reference the request struct. Dereferenced in the int handler. */
347 vq_req_get(c2dev, vq_req);
349 /* Send WR to adapter */
350 err = vq_send_wr(c2dev, (union c2wr *) wr);
352 vq_req_put(c2dev, vq_req);
356 /* Wait for reply from adapter */
357 err = vq_wait_for_reply(c2dev, vq_req);
361 /* Check that reply is present */
362 reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg;
368 err = c2_errno(reply);
369 vq_repbuf_free(c2dev, reply);
372 c2_set_qp_state(qp, C2_QP_STATE_RTS);
375 vq_req_free(c2dev, vq_req);
379 * If we fail, release reference on QP and
380 * disassociate QP from CM_ID
382 cm_id->provider_data = NULL;
384 cm_id->rem_ref(cm_id);
389 int c2_llp_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
391 struct c2_dev *c2dev;
392 struct c2wr_cr_reject_req wr;
393 struct c2_vq_req *vq_req;
394 struct c2wr_cr_reject_rep *reply;
397 c2dev = to_c2dev(cm_id->device);
400 * Allocate verbs request.
402 vq_req = vq_req_alloc(c2dev);
409 c2_wr_set_id(&wr, CCWR_CR_REJECT);
410 wr.hdr.context = (unsigned long) vq_req;
411 wr.rnic_handle = c2dev->adapter_handle;
412 wr.ep_handle = (u32) (unsigned long) cm_id->provider_data;
415 * reference the request struct. dereferenced in the int handler.
417 vq_req_get(c2dev, vq_req);
422 err = vq_send_wr(c2dev, (union c2wr *) & wr);
424 vq_req_put(c2dev, vq_req);
429 * Wait for reply from adapter
431 err = vq_wait_for_reply(c2dev, vq_req);
438 reply = (struct c2wr_cr_reject_rep *) (unsigned long)
444 err = c2_errno(reply);
448 vq_repbuf_free(c2dev, reply);
451 vq_req_free(c2dev, vq_req);