1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
69 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
71 return (conn->pi_support &&
72 cmd->prot_op != TARGET_PROT_NORMAL);
77 isert_qp_event_callback(struct ib_event *e, void *context)
79 struct isert_conn *isert_conn = context;
81 isert_err("conn %p event: %d\n", isert_conn, e->event);
83 case IB_EVENT_COMM_EST:
84 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
86 case IB_EVENT_QP_LAST_WQE_REACHED:
87 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
95 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
99 ret = ib_query_device(ib_dev, devattr);
101 isert_err("ib_query_device() failed: %d\n", ret);
104 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
105 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
110 static struct isert_comp *
111 isert_comp_get(struct isert_conn *isert_conn)
113 struct isert_device *device = isert_conn->device;
114 struct isert_comp *comp;
117 mutex_lock(&device_list_mutex);
118 for (i = 0; i < device->comps_used; i++)
119 if (device->comps[i].active_qps <
120 device->comps[min].active_qps)
122 comp = &device->comps[min];
124 mutex_unlock(&device_list_mutex);
126 isert_info("conn %p, using comp %p min_index: %d\n",
127 isert_conn, comp, min);
133 isert_comp_put(struct isert_comp *comp)
135 mutex_lock(&device_list_mutex);
137 mutex_unlock(&device_list_mutex);
140 static struct ib_qp *
141 isert_create_qp(struct isert_conn *isert_conn,
142 struct isert_comp *comp,
143 struct rdma_cm_id *cma_id)
145 struct isert_device *device = isert_conn->device;
146 struct ib_qp_init_attr attr;
149 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
150 attr.event_handler = isert_qp_event_callback;
151 attr.qp_context = isert_conn;
152 attr.send_cq = comp->cq;
153 attr.recv_cq = comp->cq;
154 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
155 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
157 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
158 * work-around for RDMA_READs with ConnectX-2.
160 * Also, still make sure to have at least two SGEs for
161 * outgoing control PDU responses.
163 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
164 isert_conn->max_sge = attr.cap.max_send_sge;
166 attr.cap.max_recv_sge = 1;
167 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
168 attr.qp_type = IB_QPT_RC;
169 if (device->pi_capable)
170 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
172 ret = rdma_create_qp(cma_id, device->pd, &attr);
174 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
182 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
184 struct isert_comp *comp;
187 comp = isert_comp_get(isert_conn);
188 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
189 if (IS_ERR(isert_conn->qp)) {
190 ret = PTR_ERR(isert_conn->qp);
196 isert_comp_put(comp);
201 isert_cq_event_callback(struct ib_event *e, void *context)
203 isert_dbg("event: %d\n", e->event);
207 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
209 struct isert_device *device = isert_conn->device;
210 struct ib_device *ib_dev = device->ib_device;
211 struct iser_rx_desc *rx_desc;
212 struct ib_sge *rx_sg;
216 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
217 sizeof(struct iser_rx_desc), GFP_KERNEL);
218 if (!isert_conn->rx_descs)
221 rx_desc = isert_conn->rx_descs;
223 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
224 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
225 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
226 if (ib_dma_mapping_error(ib_dev, dma_addr))
229 rx_desc->dma_addr = dma_addr;
231 rx_sg = &rx_desc->rx_sg;
232 rx_sg->addr = rx_desc->dma_addr;
233 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
234 rx_sg->lkey = device->mr->lkey;
237 isert_conn->rx_desc_head = 0;
242 rx_desc = isert_conn->rx_descs;
243 for (j = 0; j < i; j++, rx_desc++) {
244 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
245 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
247 kfree(isert_conn->rx_descs);
248 isert_conn->rx_descs = NULL;
250 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
256 isert_free_rx_descriptors(struct isert_conn *isert_conn)
258 struct ib_device *ib_dev = isert_conn->device->ib_device;
259 struct iser_rx_desc *rx_desc;
262 if (!isert_conn->rx_descs)
265 rx_desc = isert_conn->rx_descs;
266 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
267 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
268 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
271 kfree(isert_conn->rx_descs);
272 isert_conn->rx_descs = NULL;
275 static void isert_cq_work(struct work_struct *);
276 static void isert_cq_callback(struct ib_cq *, void *);
279 isert_free_comps(struct isert_device *device)
283 for (i = 0; i < device->comps_used; i++) {
284 struct isert_comp *comp = &device->comps[i];
287 cancel_work_sync(&comp->work);
288 ib_destroy_cq(comp->cq);
291 kfree(device->comps);
295 isert_alloc_comps(struct isert_device *device,
296 struct ib_device_attr *attr)
298 int i, max_cqe, ret = 0;
300 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
301 device->ib_device->num_comp_vectors));
303 isert_info("Using %d CQs, %s supports %d vectors support "
304 "Fast registration %d pi_capable %d\n",
305 device->comps_used, device->ib_device->name,
306 device->ib_device->num_comp_vectors, device->use_fastreg,
309 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
311 if (!device->comps) {
312 isert_err("Unable to allocate completion contexts\n");
316 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
318 for (i = 0; i < device->comps_used; i++) {
319 struct isert_comp *comp = &device->comps[i];
321 comp->device = device;
322 INIT_WORK(&comp->work, isert_cq_work);
323 comp->cq = ib_create_cq(device->ib_device,
325 isert_cq_event_callback,
328 if (IS_ERR(comp->cq)) {
329 isert_err("Unable to allocate cq\n");
330 ret = PTR_ERR(comp->cq);
335 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
342 isert_free_comps(device);
347 isert_create_device_ib_res(struct isert_device *device)
349 struct ib_device_attr *dev_attr;
352 dev_attr = &device->dev_attr;
353 ret = isert_query_device(device->ib_device, dev_attr);
357 /* asign function handlers */
358 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
359 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
360 device->use_fastreg = 1;
361 device->reg_rdma_mem = isert_reg_rdma;
362 device->unreg_rdma_mem = isert_unreg_rdma;
364 device->use_fastreg = 0;
365 device->reg_rdma_mem = isert_map_rdma;
366 device->unreg_rdma_mem = isert_unmap_cmd;
369 ret = isert_alloc_comps(device, dev_attr);
373 device->pd = ib_alloc_pd(device->ib_device);
374 if (IS_ERR(device->pd)) {
375 ret = PTR_ERR(device->pd);
376 isert_err("failed to allocate pd, device %p, ret=%d\n",
381 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE);
382 if (IS_ERR(device->mr)) {
383 ret = PTR_ERR(device->mr);
384 isert_err("failed to create dma mr, device %p, ret=%d\n",
389 /* Check signature cap */
390 device->pi_capable = dev_attr->device_cap_flags &
391 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
396 ib_dealloc_pd(device->pd);
398 isert_free_comps(device);
403 isert_free_device_ib_res(struct isert_device *device)
405 isert_info("device %p\n", device);
407 ib_dereg_mr(device->mr);
408 ib_dealloc_pd(device->pd);
409 isert_free_comps(device);
413 isert_device_put(struct isert_device *device)
415 mutex_lock(&device_list_mutex);
417 isert_info("device %p refcount %d\n", device, device->refcount);
418 if (!device->refcount) {
419 isert_free_device_ib_res(device);
420 list_del(&device->dev_node);
423 mutex_unlock(&device_list_mutex);
426 static struct isert_device *
427 isert_device_get(struct rdma_cm_id *cma_id)
429 struct isert_device *device;
432 mutex_lock(&device_list_mutex);
433 list_for_each_entry(device, &device_list, dev_node) {
434 if (device->ib_device->node_guid == cma_id->device->node_guid) {
436 isert_info("Found iser device %p refcount %d\n",
437 device, device->refcount);
438 mutex_unlock(&device_list_mutex);
443 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
445 mutex_unlock(&device_list_mutex);
446 return ERR_PTR(-ENOMEM);
449 INIT_LIST_HEAD(&device->dev_node);
451 device->ib_device = cma_id->device;
452 ret = isert_create_device_ib_res(device);
455 mutex_unlock(&device_list_mutex);
460 list_add_tail(&device->dev_node, &device_list);
461 isert_info("Created a new iser device %p refcount %d\n",
462 device, device->refcount);
463 mutex_unlock(&device_list_mutex);
469 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
471 struct fast_reg_descriptor *fr_desc, *tmp;
474 if (list_empty(&isert_conn->fr_pool))
477 isert_info("Freeing conn %p fastreg pool", isert_conn);
479 list_for_each_entry_safe(fr_desc, tmp,
480 &isert_conn->fr_pool, list) {
481 list_del(&fr_desc->list);
482 ib_free_fast_reg_page_list(fr_desc->data_frpl);
483 ib_dereg_mr(fr_desc->data_mr);
484 if (fr_desc->pi_ctx) {
485 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
486 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
487 ib_destroy_mr(fr_desc->pi_ctx->sig_mr);
488 kfree(fr_desc->pi_ctx);
494 if (i < isert_conn->fr_pool_size)
495 isert_warn("Pool still has %d regions registered\n",
496 isert_conn->fr_pool_size - i);
500 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
501 struct ib_device *device,
504 struct ib_mr_init_attr mr_init_attr;
505 struct pi_context *pi_ctx;
508 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
510 isert_err("Failed to allocate pi context\n");
514 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
515 ISCSI_ISER_SG_TABLESIZE);
516 if (IS_ERR(pi_ctx->prot_frpl)) {
517 isert_err("Failed to allocate prot frpl err=%ld\n",
518 PTR_ERR(pi_ctx->prot_frpl));
519 ret = PTR_ERR(pi_ctx->prot_frpl);
523 pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
524 if (IS_ERR(pi_ctx->prot_mr)) {
525 isert_err("Failed to allocate prot frmr err=%ld\n",
526 PTR_ERR(pi_ctx->prot_mr));
527 ret = PTR_ERR(pi_ctx->prot_mr);
530 desc->ind |= ISERT_PROT_KEY_VALID;
532 memset(&mr_init_attr, 0, sizeof(mr_init_attr));
533 mr_init_attr.max_reg_descriptors = 2;
534 mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
535 pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
536 if (IS_ERR(pi_ctx->sig_mr)) {
537 isert_err("Failed to allocate signature enabled mr err=%ld\n",
538 PTR_ERR(pi_ctx->sig_mr));
539 ret = PTR_ERR(pi_ctx->sig_mr);
543 desc->pi_ctx = pi_ctx;
544 desc->ind |= ISERT_SIG_KEY_VALID;
545 desc->ind &= ~ISERT_PROTECTED;
550 ib_dereg_mr(desc->pi_ctx->prot_mr);
552 ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
560 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
561 struct fast_reg_descriptor *fr_desc)
565 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
566 ISCSI_ISER_SG_TABLESIZE);
567 if (IS_ERR(fr_desc->data_frpl)) {
568 isert_err("Failed to allocate data frpl err=%ld\n",
569 PTR_ERR(fr_desc->data_frpl));
570 return PTR_ERR(fr_desc->data_frpl);
573 fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
574 if (IS_ERR(fr_desc->data_mr)) {
575 isert_err("Failed to allocate data frmr err=%ld\n",
576 PTR_ERR(fr_desc->data_mr));
577 ret = PTR_ERR(fr_desc->data_mr);
580 fr_desc->ind |= ISERT_DATA_KEY_VALID;
582 isert_dbg("Created fr_desc %p\n", fr_desc);
587 ib_free_fast_reg_page_list(fr_desc->data_frpl);
593 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
595 struct fast_reg_descriptor *fr_desc;
596 struct isert_device *device = isert_conn->device;
597 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
598 struct se_node_acl *se_nacl = se_sess->se_node_acl;
601 * Setup the number of FRMRs based upon the number of tags
602 * available to session in iscsi_target_locate_portal().
604 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
605 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
607 isert_conn->fr_pool_size = 0;
608 for (i = 0; i < tag_num; i++) {
609 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
611 isert_err("Failed to allocate fast_reg descriptor\n");
616 ret = isert_create_fr_desc(device->ib_device,
617 device->pd, fr_desc);
619 isert_err("Failed to create fastreg descriptor err=%d\n",
625 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
626 isert_conn->fr_pool_size++;
629 isert_dbg("Creating conn %p fastreg pool size=%d",
630 isert_conn, isert_conn->fr_pool_size);
635 isert_conn_free_fastreg_pool(isert_conn);
640 isert_init_conn(struct isert_conn *isert_conn)
642 isert_conn->state = ISER_CONN_INIT;
643 INIT_LIST_HEAD(&isert_conn->accept_node);
644 init_completion(&isert_conn->login_comp);
645 init_completion(&isert_conn->login_req_comp);
646 init_completion(&isert_conn->wait);
647 kref_init(&isert_conn->kref);
648 mutex_init(&isert_conn->mutex);
649 spin_lock_init(&isert_conn->pool_lock);
650 INIT_LIST_HEAD(&isert_conn->fr_pool);
654 isert_free_login_buf(struct isert_conn *isert_conn)
656 struct ib_device *ib_dev = isert_conn->device->ib_device;
658 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
659 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
660 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
661 ISCSI_DEF_MAX_RECV_SEG_LEN,
663 kfree(isert_conn->login_buf);
667 isert_alloc_login_buf(struct isert_conn *isert_conn,
668 struct ib_device *ib_dev)
672 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
673 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
674 if (!isert_conn->login_buf) {
675 isert_err("Unable to allocate isert_conn->login_buf\n");
679 isert_conn->login_req_buf = isert_conn->login_buf;
680 isert_conn->login_rsp_buf = isert_conn->login_buf +
681 ISCSI_DEF_MAX_RECV_SEG_LEN;
683 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
684 isert_conn->login_buf, isert_conn->login_req_buf,
685 isert_conn->login_rsp_buf);
687 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
688 (void *)isert_conn->login_req_buf,
689 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
691 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
693 isert_err("login_req_dma mapping error: %d\n", ret);
694 isert_conn->login_req_dma = 0;
698 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
699 (void *)isert_conn->login_rsp_buf,
700 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
702 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
704 isert_err("login_rsp_dma mapping error: %d\n", ret);
705 isert_conn->login_rsp_dma = 0;
706 goto out_req_dma_map;
712 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
713 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
715 kfree(isert_conn->login_buf);
720 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
722 struct isert_np *isert_np = cma_id->context;
723 struct iscsi_np *np = isert_np->np;
724 struct isert_conn *isert_conn;
725 struct isert_device *device;
728 spin_lock_bh(&np->np_thread_lock);
730 spin_unlock_bh(&np->np_thread_lock);
731 isert_dbg("iscsi_np is not enabled, reject connect request\n");
732 return rdma_reject(cma_id, NULL, 0);
734 spin_unlock_bh(&np->np_thread_lock);
736 isert_dbg("cma_id: %p, portal: %p\n",
737 cma_id, cma_id->context);
739 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
743 isert_init_conn(isert_conn);
744 isert_conn->cm_id = cma_id;
746 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
750 device = isert_device_get(cma_id);
751 if (IS_ERR(device)) {
752 ret = PTR_ERR(device);
753 goto out_rsp_dma_map;
755 isert_conn->device = device;
757 /* Set max inflight RDMA READ requests */
758 isert_conn->initiator_depth = min_t(u8,
759 event->param.conn.initiator_depth,
760 device->dev_attr.max_qp_init_rd_atom);
761 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
763 ret = isert_conn_setup_qp(isert_conn, cma_id);
767 ret = isert_rdma_post_recvl(isert_conn);
771 ret = isert_rdma_accept(isert_conn);
775 mutex_lock(&isert_np->np_accept_mutex);
776 list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
777 mutex_unlock(&isert_np->np_accept_mutex);
779 isert_info("np %p: Allow accept_np to continue\n", np);
780 up(&isert_np->np_sem);
784 isert_device_put(device);
786 isert_free_login_buf(isert_conn);
789 rdma_reject(cma_id, NULL, 0);
794 isert_connect_release(struct isert_conn *isert_conn)
796 struct isert_device *device = isert_conn->device;
798 isert_dbg("conn %p\n", isert_conn);
802 if (device->use_fastreg)
803 isert_conn_free_fastreg_pool(isert_conn);
805 isert_free_rx_descriptors(isert_conn);
806 if (isert_conn->cm_id)
807 rdma_destroy_id(isert_conn->cm_id);
809 if (isert_conn->qp) {
810 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
812 isert_comp_put(comp);
813 ib_destroy_qp(isert_conn->qp);
816 if (isert_conn->login_buf)
817 isert_free_login_buf(isert_conn);
819 isert_device_put(device);
825 isert_connected_handler(struct rdma_cm_id *cma_id)
827 struct isert_conn *isert_conn = cma_id->qp->qp_context;
829 isert_info("conn %p\n", isert_conn);
831 if (!kref_get_unless_zero(&isert_conn->kref)) {
832 isert_warn("conn %p connect_release is running\n", isert_conn);
836 mutex_lock(&isert_conn->mutex);
837 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
838 isert_conn->state = ISER_CONN_UP;
839 mutex_unlock(&isert_conn->mutex);
843 isert_release_kref(struct kref *kref)
845 struct isert_conn *isert_conn = container_of(kref,
846 struct isert_conn, kref);
848 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
851 isert_connect_release(isert_conn);
855 isert_put_conn(struct isert_conn *isert_conn)
857 kref_put(&isert_conn->kref, isert_release_kref);
861 * isert_conn_terminate() - Initiate connection termination
862 * @isert_conn: isert connection struct
865 * In case the connection state is FULL_FEATURE, move state
866 * to TEMINATING and start teardown sequence (rdma_disconnect).
867 * In case the connection state is UP, complete flush as well.
869 * This routine must be called with mutex held. Thus it is
870 * safe to call multiple times.
873 isert_conn_terminate(struct isert_conn *isert_conn)
877 switch (isert_conn->state) {
878 case ISER_CONN_TERMINATING:
881 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
882 isert_info("Terminating conn %p state %d\n",
883 isert_conn, isert_conn->state);
884 isert_conn->state = ISER_CONN_TERMINATING;
885 err = rdma_disconnect(isert_conn->cm_id);
887 isert_warn("Failed rdma_disconnect isert_conn %p\n",
891 isert_warn("conn %p teminating in state %d\n",
892 isert_conn, isert_conn->state);
897 isert_np_cma_handler(struct isert_np *isert_np,
898 enum rdma_cm_event_type event)
900 isert_dbg("isert np %p, handling event %d\n", isert_np, event);
903 case RDMA_CM_EVENT_DEVICE_REMOVAL:
904 isert_np->np_cm_id = NULL;
906 case RDMA_CM_EVENT_ADDR_CHANGE:
907 isert_np->np_cm_id = isert_setup_id(isert_np);
908 if (IS_ERR(isert_np->np_cm_id)) {
909 isert_err("isert np %p setup id failed: %ld\n",
910 isert_np, PTR_ERR(isert_np->np_cm_id));
911 isert_np->np_cm_id = NULL;
915 isert_err("isert np %p Unexpected event %d\n",
923 isert_disconnected_handler(struct rdma_cm_id *cma_id,
924 enum rdma_cm_event_type event)
926 struct isert_np *isert_np = cma_id->context;
927 struct isert_conn *isert_conn;
929 if (isert_np->np_cm_id == cma_id)
930 return isert_np_cma_handler(cma_id->context, event);
932 isert_conn = cma_id->qp->qp_context;
934 mutex_lock(&isert_conn->mutex);
935 isert_conn_terminate(isert_conn);
936 mutex_unlock(&isert_conn->mutex);
938 isert_info("conn %p completing wait\n", isert_conn);
939 complete(&isert_conn->wait);
945 isert_connect_error(struct rdma_cm_id *cma_id)
947 struct isert_conn *isert_conn = cma_id->qp->qp_context;
949 isert_conn->cm_id = NULL;
950 isert_put_conn(isert_conn);
956 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
960 isert_info("event %d status %d id %p np %p\n", event->event,
961 event->status, cma_id, cma_id->context);
963 switch (event->event) {
964 case RDMA_CM_EVENT_CONNECT_REQUEST:
965 ret = isert_connect_request(cma_id, event);
967 isert_err("failed handle connect request %d\n", ret);
969 case RDMA_CM_EVENT_ESTABLISHED:
970 isert_connected_handler(cma_id);
972 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
973 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
974 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
975 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
976 ret = isert_disconnected_handler(cma_id, event->event);
978 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
979 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
980 case RDMA_CM_EVENT_CONNECT_ERROR:
981 ret = isert_connect_error(cma_id);
984 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
992 isert_post_recv(struct isert_conn *isert_conn, u32 count)
994 struct ib_recv_wr *rx_wr, *rx_wr_failed;
996 unsigned int rx_head = isert_conn->rx_desc_head;
997 struct iser_rx_desc *rx_desc;
999 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1000 rx_desc = &isert_conn->rx_descs[rx_head];
1001 rx_wr->wr_id = (uintptr_t)rx_desc;
1002 rx_wr->sg_list = &rx_desc->rx_sg;
1004 rx_wr->next = rx_wr + 1;
1005 rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
1009 rx_wr->next = NULL; /* mark end of work requests list */
1011 isert_conn->post_recv_buf_count += count;
1012 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
1015 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1016 isert_conn->post_recv_buf_count -= count;
1018 isert_dbg("Posted %d RX buffers\n", count);
1019 isert_conn->rx_desc_head = rx_head;
1025 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1027 struct ib_device *ib_dev = isert_conn->cm_id->device;
1028 struct ib_send_wr send_wr, *send_wr_failed;
1031 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1032 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1034 send_wr.next = NULL;
1035 send_wr.wr_id = (uintptr_t)tx_desc;
1036 send_wr.sg_list = tx_desc->tx_sg;
1037 send_wr.num_sge = tx_desc->num_sge;
1038 send_wr.opcode = IB_WR_SEND;
1039 send_wr.send_flags = IB_SEND_SIGNALED;
1041 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
1043 isert_err("ib_post_send() failed, ret: %d\n", ret);
1049 isert_create_send_desc(struct isert_conn *isert_conn,
1050 struct isert_cmd *isert_cmd,
1051 struct iser_tx_desc *tx_desc)
1053 struct isert_device *device = isert_conn->device;
1054 struct ib_device *ib_dev = device->ib_device;
1056 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1057 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1059 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1060 tx_desc->iser_header.flags = ISER_VER;
1062 tx_desc->num_sge = 1;
1063 tx_desc->isert_cmd = isert_cmd;
1065 if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
1066 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1067 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1072 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1073 struct iser_tx_desc *tx_desc)
1075 struct isert_device *device = isert_conn->device;
1076 struct ib_device *ib_dev = device->ib_device;
1079 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1080 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1081 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1082 isert_err("ib_dma_mapping_error() failed\n");
1086 tx_desc->dma_addr = dma_addr;
1087 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1088 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1089 tx_desc->tx_sg[0].lkey = device->mr->lkey;
1091 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1092 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1093 tx_desc->tx_sg[0].lkey);
1099 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1100 struct ib_send_wr *send_wr)
1102 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1104 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1105 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1106 send_wr->opcode = IB_WR_SEND;
1107 send_wr->sg_list = &tx_desc->tx_sg[0];
1108 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1109 send_wr->send_flags = IB_SEND_SIGNALED;
1113 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1115 struct ib_recv_wr rx_wr, *rx_wr_fail;
1119 memset(&sge, 0, sizeof(struct ib_sge));
1120 sge.addr = isert_conn->login_req_dma;
1121 sge.length = ISER_RX_LOGIN_SIZE;
1122 sge.lkey = isert_conn->device->mr->lkey;
1124 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1125 sge.addr, sge.length, sge.lkey);
1127 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1128 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1129 rx_wr.sg_list = &sge;
1132 isert_conn->post_recv_buf_count++;
1133 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
1135 isert_err("ib_post_recv() failed: %d\n", ret);
1136 isert_conn->post_recv_buf_count--;
1143 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1146 struct isert_conn *isert_conn = conn->context;
1147 struct isert_device *device = isert_conn->device;
1148 struct ib_device *ib_dev = device->ib_device;
1149 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
1152 isert_create_send_desc(isert_conn, NULL, tx_desc);
1154 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1155 sizeof(struct iscsi_hdr));
1157 isert_init_tx_hdrs(isert_conn, tx_desc);
1160 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1162 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1163 length, DMA_TO_DEVICE);
1165 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1167 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1168 length, DMA_TO_DEVICE);
1170 tx_dsg->addr = isert_conn->login_rsp_dma;
1171 tx_dsg->length = length;
1172 tx_dsg->lkey = isert_conn->device->mr->lkey;
1173 tx_desc->num_sge = 2;
1175 if (!login->login_failed) {
1176 if (login->login_complete) {
1177 if (!conn->sess->sess_ops->SessionType &&
1178 isert_conn->device->use_fastreg) {
1179 ret = isert_conn_create_fastreg_pool(isert_conn);
1181 isert_err("Conn: %p failed to create"
1182 " fastreg pool\n", isert_conn);
1187 ret = isert_alloc_rx_descriptors(isert_conn);
1191 ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
1195 /* Now we are in FULL_FEATURE phase */
1196 mutex_lock(&isert_conn->mutex);
1197 isert_conn->state = ISER_CONN_FULL_FEATURE;
1198 mutex_unlock(&isert_conn->mutex);
1202 ret = isert_rdma_post_recvl(isert_conn);
1207 ret = isert_post_send(isert_conn, tx_desc);
1215 isert_rx_login_req(struct isert_conn *isert_conn)
1217 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1218 int rx_buflen = isert_conn->login_req_len;
1219 struct iscsi_conn *conn = isert_conn->conn;
1220 struct iscsi_login *login = conn->conn_login;
1223 isert_info("conn %p\n", isert_conn);
1225 WARN_ON_ONCE(!login);
1227 if (login->first_request) {
1228 struct iscsi_login_req *login_req =
1229 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1231 * Setup the initial iscsi_login values from the leading
1232 * login request PDU.
1234 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1235 login->current_stage =
1236 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1238 login->version_min = login_req->min_version;
1239 login->version_max = login_req->max_version;
1240 memcpy(login->isid, login_req->isid, 6);
1241 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1242 login->init_task_tag = login_req->itt;
1243 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1244 login->cid = be16_to_cpu(login_req->cid);
1245 login->tsih = be16_to_cpu(login_req->tsih);
1248 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1250 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1251 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1252 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1253 MAX_KEY_VALUE_PAIRS);
1254 memcpy(login->req_buf, &rx_desc->data[0], size);
1256 if (login->first_request) {
1257 complete(&isert_conn->login_comp);
1260 schedule_delayed_work(&conn->login_work, 0);
1263 static struct iscsi_cmd
1264 *isert_allocate_cmd(struct iscsi_conn *conn)
1266 struct isert_conn *isert_conn = conn->context;
1267 struct isert_cmd *isert_cmd;
1268 struct iscsi_cmd *cmd;
1270 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1272 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1275 isert_cmd = iscsit_priv_cmd(cmd);
1276 isert_cmd->conn = isert_conn;
1277 isert_cmd->iscsi_cmd = cmd;
1283 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1284 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1285 struct iser_rx_desc *rx_desc, unsigned char *buf)
1287 struct iscsi_conn *conn = isert_conn->conn;
1288 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1289 struct scatterlist *sg;
1290 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1291 bool dump_payload = false;
1293 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1297 imm_data = cmd->immediate_data;
1298 imm_data_len = cmd->first_burst_len;
1299 unsol_data = cmd->unsolicited_data;
1301 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1304 } else if (rc > 0) {
1305 dump_payload = true;
1312 sg = &cmd->se_cmd.t_data_sg[0];
1313 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1315 isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
1316 sg, sg_nents, &rx_desc->data[0], imm_data_len);
1318 sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
1320 cmd->write_data_done += imm_data_len;
1322 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1323 spin_lock_bh(&cmd->istate_lock);
1324 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1325 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1326 spin_unlock_bh(&cmd->istate_lock);
1330 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1332 if (!rc && dump_payload == false && unsol_data)
1333 iscsit_set_unsoliticed_dataout(cmd);
1334 else if (dump_payload && imm_data)
1335 target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
1341 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1342 struct iser_rx_desc *rx_desc, unsigned char *buf)
1344 struct scatterlist *sg_start;
1345 struct iscsi_conn *conn = isert_conn->conn;
1346 struct iscsi_cmd *cmd = NULL;
1347 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1348 u32 unsol_data_len = ntoh24(hdr->dlength);
1349 int rc, sg_nents, sg_off, page_off;
1351 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1357 * FIXME: Unexpected unsolicited_data out
1359 if (!cmd->unsolicited_data) {
1360 isert_err("Received unexpected solicited data payload\n");
1365 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1366 "write_data_done: %u, data_length: %u\n",
1367 unsol_data_len, cmd->write_data_done,
1368 cmd->se_cmd.data_length);
1370 sg_off = cmd->write_data_done / PAGE_SIZE;
1371 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1372 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1373 page_off = cmd->write_data_done % PAGE_SIZE;
1375 * FIXME: Non page-aligned unsolicited_data out
1378 isert_err("unexpected non-page aligned data payload\n");
1382 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1383 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1384 sg_nents, &rx_desc->data[0], unsol_data_len);
1386 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1389 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1397 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1398 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1401 struct iscsi_conn *conn = isert_conn->conn;
1402 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1405 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1409 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1412 return iscsit_process_nop_out(conn, cmd, hdr);
1416 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1417 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1418 struct iscsi_text *hdr)
1420 struct iscsi_conn *conn = isert_conn->conn;
1421 u32 payload_length = ntoh24(hdr->dlength);
1423 unsigned char *text_in = NULL;
1425 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1429 if (payload_length) {
1430 text_in = kzalloc(payload_length, GFP_KERNEL);
1432 isert_err("Unable to allocate text_in of payload_length: %u\n",
1437 cmd->text_in_ptr = text_in;
1439 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1441 return iscsit_process_text_cmd(conn, cmd, hdr);
1445 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1446 uint32_t read_stag, uint64_t read_va,
1447 uint32_t write_stag, uint64_t write_va)
1449 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1450 struct iscsi_conn *conn = isert_conn->conn;
1451 struct iscsi_cmd *cmd;
1452 struct isert_cmd *isert_cmd;
1454 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1456 if (conn->sess->sess_ops->SessionType &&
1457 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1458 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1459 " ignoring\n", opcode);
1464 case ISCSI_OP_SCSI_CMD:
1465 cmd = isert_allocate_cmd(conn);
1469 isert_cmd = iscsit_priv_cmd(cmd);
1470 isert_cmd->read_stag = read_stag;
1471 isert_cmd->read_va = read_va;
1472 isert_cmd->write_stag = write_stag;
1473 isert_cmd->write_va = write_va;
1475 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1476 rx_desc, (unsigned char *)hdr);
1478 case ISCSI_OP_NOOP_OUT:
1479 cmd = isert_allocate_cmd(conn);
1483 isert_cmd = iscsit_priv_cmd(cmd);
1484 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1485 rx_desc, (unsigned char *)hdr);
1487 case ISCSI_OP_SCSI_DATA_OUT:
1488 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1489 (unsigned char *)hdr);
1491 case ISCSI_OP_SCSI_TMFUNC:
1492 cmd = isert_allocate_cmd(conn);
1496 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1497 (unsigned char *)hdr);
1499 case ISCSI_OP_LOGOUT:
1500 cmd = isert_allocate_cmd(conn);
1504 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1507 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
1508 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1512 cmd = isert_allocate_cmd(conn);
1517 isert_cmd = iscsit_priv_cmd(cmd);
1518 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1519 rx_desc, (struct iscsi_text *)hdr);
1522 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1531 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1533 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1534 uint64_t read_va = 0, write_va = 0;
1535 uint32_t read_stag = 0, write_stag = 0;
1538 switch (iser_hdr->flags & 0xF0) {
1540 if (iser_hdr->flags & ISER_RSV) {
1541 read_stag = be32_to_cpu(iser_hdr->read_stag);
1542 read_va = be64_to_cpu(iser_hdr->read_va);
1543 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1544 read_stag, (unsigned long long)read_va);
1546 if (iser_hdr->flags & ISER_WSV) {
1547 write_stag = be32_to_cpu(iser_hdr->write_stag);
1548 write_va = be64_to_cpu(iser_hdr->write_va);
1549 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1550 write_stag, (unsigned long long)write_va);
1553 isert_dbg("ISER ISCSI_CTRL PDU\n");
1556 isert_err("iSER Hello message\n");
1559 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1563 rc = isert_rx_opcode(isert_conn, rx_desc,
1564 read_stag, read_va, write_stag, write_va);
1568 isert_rcv_completion(struct iser_rx_desc *desc,
1569 struct isert_conn *isert_conn,
1572 struct ib_device *ib_dev = isert_conn->cm_id->device;
1573 struct iscsi_hdr *hdr;
1575 int rx_buflen, outstanding;
1577 if ((char *)desc == isert_conn->login_req_buf) {
1578 rx_dma = isert_conn->login_req_dma;
1579 rx_buflen = ISER_RX_LOGIN_SIZE;
1580 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1583 rx_dma = desc->dma_addr;
1584 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1585 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1589 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1591 hdr = &desc->iscsi_header;
1592 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1593 hdr->opcode, hdr->itt, hdr->flags,
1594 (int)(xfer_len - ISER_HEADERS_LEN));
1596 if ((char *)desc == isert_conn->login_req_buf) {
1597 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1598 if (isert_conn->conn) {
1599 struct iscsi_login *login = isert_conn->conn->conn_login;
1601 if (login && !login->first_request)
1602 isert_rx_login_req(isert_conn);
1604 mutex_lock(&isert_conn->mutex);
1605 complete(&isert_conn->login_req_comp);
1606 mutex_unlock(&isert_conn->mutex);
1608 isert_rx_do_work(desc, isert_conn);
1611 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1614 isert_conn->post_recv_buf_count--;
1615 isert_dbg("Decremented post_recv_buf_count: %d\n",
1616 isert_conn->post_recv_buf_count);
1618 if ((char *)desc == isert_conn->login_req_buf)
1621 outstanding = isert_conn->post_recv_buf_count;
1622 if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
1623 int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
1624 ISERT_MIN_POSTED_RX);
1625 err = isert_post_recv(isert_conn, count);
1627 isert_err("isert_post_recv() count: %d failed, %d\n",
1634 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1635 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1636 enum iser_ib_op_code op, struct isert_data_buf *data)
1638 struct ib_device *ib_dev = isert_conn->cm_id->device;
1640 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1641 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1643 data->len = length - offset;
1644 data->offset = offset;
1645 data->sg_off = data->offset / PAGE_SIZE;
1647 data->sg = &sg[data->sg_off];
1648 data->nents = min_t(unsigned int, nents - data->sg_off,
1649 ISCSI_ISER_SG_TABLESIZE);
1650 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1653 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1655 if (unlikely(!data->dma_nents)) {
1656 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1660 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1661 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1667 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1669 struct ib_device *ib_dev = isert_conn->cm_id->device;
1671 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1672 memset(data, 0, sizeof(*data));
1678 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1680 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1682 isert_dbg("Cmd %p\n", isert_cmd);
1685 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1686 isert_unmap_data_buf(isert_conn, &wr->data);
1690 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1696 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1703 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1705 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1707 isert_dbg("Cmd %p\n", isert_cmd);
1710 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1711 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1712 isert_unmap_data_buf(isert_conn, &wr->prot);
1713 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1715 spin_lock_bh(&isert_conn->pool_lock);
1716 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
1717 spin_unlock_bh(&isert_conn->pool_lock);
1722 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1723 isert_unmap_data_buf(isert_conn, &wr->data);
1731 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1733 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1734 struct isert_conn *isert_conn = isert_cmd->conn;
1735 struct iscsi_conn *conn = isert_conn->conn;
1736 struct isert_device *device = isert_conn->device;
1737 struct iscsi_text_rsp *hdr;
1739 isert_dbg("Cmd %p\n", isert_cmd);
1741 switch (cmd->iscsi_opcode) {
1742 case ISCSI_OP_SCSI_CMD:
1743 spin_lock_bh(&conn->cmd_lock);
1744 if (!list_empty(&cmd->i_conn_node))
1745 list_del_init(&cmd->i_conn_node);
1746 spin_unlock_bh(&conn->cmd_lock);
1748 if (cmd->data_direction == DMA_TO_DEVICE) {
1749 iscsit_stop_dataout_timer(cmd);
1751 * Check for special case during comp_err where
1752 * WRITE_PENDING has been handed off from core,
1753 * but requires an extra target_put_sess_cmd()
1754 * before transport_generic_free_cmd() below.
1757 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1758 struct se_cmd *se_cmd = &cmd->se_cmd;
1760 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1764 device->unreg_rdma_mem(isert_cmd, isert_conn);
1765 transport_generic_free_cmd(&cmd->se_cmd, 0);
1767 case ISCSI_OP_SCSI_TMFUNC:
1768 spin_lock_bh(&conn->cmd_lock);
1769 if (!list_empty(&cmd->i_conn_node))
1770 list_del_init(&cmd->i_conn_node);
1771 spin_unlock_bh(&conn->cmd_lock);
1773 transport_generic_free_cmd(&cmd->se_cmd, 0);
1775 case ISCSI_OP_REJECT:
1776 case ISCSI_OP_NOOP_OUT:
1778 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1779 /* If the continue bit is on, keep the command alive */
1780 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1783 spin_lock_bh(&conn->cmd_lock);
1784 if (!list_empty(&cmd->i_conn_node))
1785 list_del_init(&cmd->i_conn_node);
1786 spin_unlock_bh(&conn->cmd_lock);
1789 * Handle special case for REJECT when iscsi_add_reject*() has
1790 * overwritten the original iscsi_opcode assignment, and the
1791 * associated cmd->se_cmd needs to be released.
1793 if (cmd->se_cmd.se_tfo != NULL) {
1794 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1796 transport_generic_free_cmd(&cmd->se_cmd, 0);
1803 iscsit_release_cmd(cmd);
1809 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1811 if (tx_desc->dma_addr != 0) {
1812 isert_dbg("unmap single for tx_desc->dma_addr\n");
1813 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1814 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1815 tx_desc->dma_addr = 0;
1820 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1821 struct ib_device *ib_dev, bool comp_err)
1823 if (isert_cmd->pdu_buf_dma != 0) {
1824 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1825 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1826 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1827 isert_cmd->pdu_buf_dma = 0;
1830 isert_unmap_tx_desc(tx_desc, ib_dev);
1831 isert_put_cmd(isert_cmd, comp_err);
1835 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1837 struct ib_mr_status mr_status;
1840 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1842 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1843 goto fail_mr_status;
1846 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1848 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1850 switch (mr_status.sig_err.err_type) {
1851 case IB_SIG_BAD_GUARD:
1852 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1854 case IB_SIG_BAD_REFTAG:
1855 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1857 case IB_SIG_BAD_APPTAG:
1858 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1861 sec_offset_err = mr_status.sig_err.sig_err_offset;
1862 do_div(sec_offset_err, block_size);
1863 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1865 isert_err("PI error found type %d at sector 0x%llx "
1866 "expected 0x%x vs actual 0x%x\n",
1867 mr_status.sig_err.err_type,
1868 (unsigned long long)se_cmd->bad_sector,
1869 mr_status.sig_err.expected,
1870 mr_status.sig_err.actual);
1879 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1880 struct isert_cmd *isert_cmd)
1882 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1883 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1884 struct se_cmd *se_cmd = &cmd->se_cmd;
1885 struct isert_conn *isert_conn = isert_cmd->conn;
1886 struct isert_device *device = isert_conn->device;
1889 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1890 ret = isert_check_pi_status(se_cmd,
1891 wr->fr_desc->pi_ctx->sig_mr);
1892 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1895 device->unreg_rdma_mem(isert_cmd, isert_conn);
1896 wr->send_wr_num = 0;
1898 transport_send_check_condition_and_sense(se_cmd,
1901 isert_put_response(isert_conn->conn, cmd);
1905 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1906 struct isert_cmd *isert_cmd)
1908 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1909 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1910 struct se_cmd *se_cmd = &cmd->se_cmd;
1911 struct isert_conn *isert_conn = isert_cmd->conn;
1912 struct isert_device *device = isert_conn->device;
1915 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1916 ret = isert_check_pi_status(se_cmd,
1917 wr->fr_desc->pi_ctx->sig_mr);
1918 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1921 iscsit_stop_dataout_timer(cmd);
1922 device->unreg_rdma_mem(isert_cmd, isert_conn);
1923 cmd->write_data_done = wr->data.len;
1924 wr->send_wr_num = 0;
1926 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1927 spin_lock_bh(&cmd->istate_lock);
1928 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1929 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1930 spin_unlock_bh(&cmd->istate_lock);
1933 target_put_sess_cmd(se_cmd->se_sess, se_cmd);
1934 transport_send_check_condition_and_sense(se_cmd,
1937 target_execute_cmd(se_cmd);
1942 isert_do_control_comp(struct work_struct *work)
1944 struct isert_cmd *isert_cmd = container_of(work,
1945 struct isert_cmd, comp_work);
1946 struct isert_conn *isert_conn = isert_cmd->conn;
1947 struct ib_device *ib_dev = isert_conn->cm_id->device;
1948 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1950 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1952 switch (cmd->i_state) {
1953 case ISTATE_SEND_TASKMGTRSP:
1954 iscsit_tmr_post_handler(cmd, cmd->conn);
1955 case ISTATE_SEND_REJECT: /* FALLTHRU */
1956 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1957 cmd->i_state = ISTATE_SENT_STATUS;
1958 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1961 case ISTATE_SEND_LOGOUTRSP:
1962 iscsit_logout_post_handler(cmd, cmd->conn);
1965 isert_err("Unknown i_state %d\n", cmd->i_state);
1972 isert_response_completion(struct iser_tx_desc *tx_desc,
1973 struct isert_cmd *isert_cmd,
1974 struct isert_conn *isert_conn,
1975 struct ib_device *ib_dev)
1977 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1979 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
1980 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
1981 cmd->i_state == ISTATE_SEND_REJECT ||
1982 cmd->i_state == ISTATE_SEND_TEXTRSP) {
1983 isert_unmap_tx_desc(tx_desc, ib_dev);
1985 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
1986 queue_work(isert_comp_wq, &isert_cmd->comp_work);
1990 cmd->i_state = ISTATE_SENT_STATUS;
1991 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
1995 isert_snd_completion(struct iser_tx_desc *tx_desc,
1996 struct isert_conn *isert_conn)
1998 struct ib_device *ib_dev = isert_conn->cm_id->device;
1999 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
2000 struct isert_rdma_wr *wr;
2003 isert_unmap_tx_desc(tx_desc, ib_dev);
2006 wr = &isert_cmd->rdma_wr;
2008 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2010 switch (wr->iser_ib_op) {
2012 isert_response_completion(tx_desc, isert_cmd,
2013 isert_conn, ib_dev);
2015 case ISER_IB_RDMA_WRITE:
2016 isert_completion_rdma_write(tx_desc, isert_cmd);
2018 case ISER_IB_RDMA_READ:
2019 isert_completion_rdma_read(tx_desc, isert_cmd);
2022 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
2029 * is_isert_tx_desc() - Indicate if the completion wr_id
2030 * is a TX descriptor or not.
2031 * @isert_conn: iser connection
2032 * @wr_id: completion WR identifier
2034 * Since we cannot rely on wc opcode in FLUSH errors
2035 * we must work around it by checking if the wr_id address
2036 * falls in the iser connection rx_descs buffer. If so
2037 * it is an RX descriptor, otherwize it is a TX.
2040 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2042 void *start = isert_conn->rx_descs;
2043 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
2045 if (wr_id >= start && wr_id < start + len)
2052 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
2054 if (wc->wr_id == ISER_BEACON_WRID) {
2055 isert_info("conn %p completing wait_comp_err\n",
2057 complete(&isert_conn->wait_comp_err);
2058 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2059 struct ib_device *ib_dev = isert_conn->cm_id->device;
2060 struct isert_cmd *isert_cmd;
2061 struct iser_tx_desc *desc;
2063 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2064 isert_cmd = desc->isert_cmd;
2066 isert_unmap_tx_desc(desc, ib_dev);
2068 isert_completion_put(desc, isert_cmd, ib_dev, true);
2070 isert_conn->post_recv_buf_count--;
2071 if (!isert_conn->post_recv_buf_count)
2072 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2077 isert_handle_wc(struct ib_wc *wc)
2079 struct isert_conn *isert_conn;
2080 struct iser_tx_desc *tx_desc;
2081 struct iser_rx_desc *rx_desc;
2083 isert_conn = wc->qp->qp_context;
2084 if (likely(wc->status == IB_WC_SUCCESS)) {
2085 if (wc->opcode == IB_WC_RECV) {
2086 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2087 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
2089 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2090 isert_snd_completion(tx_desc, isert_conn);
2093 if (wc->status != IB_WC_WR_FLUSH_ERR)
2094 isert_err("wr id %llx status %d vend_err %x\n",
2095 wc->wr_id, wc->status, wc->vendor_err);
2097 isert_dbg("flush error: wr id %llx\n", wc->wr_id);
2099 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2100 isert_cq_comp_err(isert_conn, wc);
2105 isert_cq_work(struct work_struct *work)
2107 enum { isert_poll_budget = 65536 };
2108 struct isert_comp *comp = container_of(work, struct isert_comp,
2110 struct ib_wc *const wcs = comp->wcs;
2111 int i, n, completed = 0;
2113 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2114 for (i = 0; i < n; i++)
2115 isert_handle_wc(&wcs[i]);
2118 if (completed >= isert_poll_budget)
2122 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2126 isert_cq_callback(struct ib_cq *cq, void *context)
2128 struct isert_comp *comp = context;
2130 queue_work(isert_comp_wq, &comp->work);
2134 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2136 struct ib_send_wr *wr_failed;
2139 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2142 isert_err("ib_post_send failed with %d\n", ret);
2149 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2151 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2152 struct isert_conn *isert_conn = conn->context;
2153 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2154 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2155 &isert_cmd->tx_desc.iscsi_header;
2157 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2158 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2159 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2161 * Attach SENSE DATA payload to iSCSI Response PDU
2163 if (cmd->se_cmd.sense_buffer &&
2164 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2165 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2166 struct isert_device *device = isert_conn->device;
2167 struct ib_device *ib_dev = device->ib_device;
2168 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2169 u32 padding, pdu_len;
2171 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2173 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2175 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2176 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2177 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2179 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2180 (void *)cmd->sense_buffer, pdu_len,
2183 isert_cmd->pdu_buf_len = pdu_len;
2184 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2185 tx_dsg->length = pdu_len;
2186 tx_dsg->lkey = device->mr->lkey;
2187 isert_cmd->tx_desc.num_sge = 2;
2190 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2192 isert_dbg("Posting SCSI Response\n");
2194 return isert_post_response(isert_conn, isert_cmd);
2198 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2200 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2201 struct isert_conn *isert_conn = conn->context;
2202 struct isert_device *device = isert_conn->device;
2204 spin_lock_bh(&conn->cmd_lock);
2205 if (!list_empty(&cmd->i_conn_node))
2206 list_del_init(&cmd->i_conn_node);
2207 spin_unlock_bh(&conn->cmd_lock);
2209 if (cmd->data_direction == DMA_TO_DEVICE)
2210 iscsit_stop_dataout_timer(cmd);
2212 device->unreg_rdma_mem(isert_cmd, isert_conn);
2215 static enum target_prot_op
2216 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2218 struct isert_conn *isert_conn = conn->context;
2219 struct isert_device *device = isert_conn->device;
2221 if (conn->tpg->tpg_attrib.t10_pi) {
2222 if (device->pi_capable) {
2223 isert_info("conn %p PI offload enabled\n", isert_conn);
2224 isert_conn->pi_support = true;
2225 return TARGET_PROT_ALL;
2229 isert_info("conn %p PI offload disabled\n", isert_conn);
2230 isert_conn->pi_support = false;
2232 return TARGET_PROT_NORMAL;
2236 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2237 bool nopout_response)
2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2240 struct isert_conn *isert_conn = conn->context;
2241 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2243 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2244 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2245 &isert_cmd->tx_desc.iscsi_header,
2247 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2248 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2250 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2252 return isert_post_response(isert_conn, isert_cmd);
2256 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2258 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2259 struct isert_conn *isert_conn = conn->context;
2260 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2262 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2263 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2264 &isert_cmd->tx_desc.iscsi_header);
2265 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2266 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2268 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2270 return isert_post_response(isert_conn, isert_cmd);
2274 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2276 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2277 struct isert_conn *isert_conn = conn->context;
2278 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2280 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2281 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2282 &isert_cmd->tx_desc.iscsi_header);
2283 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2284 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2286 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2288 return isert_post_response(isert_conn, isert_cmd);
2292 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2294 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2295 struct isert_conn *isert_conn = conn->context;
2296 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2297 struct isert_device *device = isert_conn->device;
2298 struct ib_device *ib_dev = device->ib_device;
2299 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2300 struct iscsi_reject *hdr =
2301 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2303 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2304 iscsit_build_reject(cmd, conn, hdr);
2305 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2307 hton24(hdr->dlength, ISCSI_HDR_LEN);
2308 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2309 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2311 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2312 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2313 tx_dsg->length = ISCSI_HDR_LEN;
2314 tx_dsg->lkey = device->mr->lkey;
2315 isert_cmd->tx_desc.num_sge = 2;
2317 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2319 isert_dbg("conn %p Posting Reject\n", isert_conn);
2321 return isert_post_response(isert_conn, isert_cmd);
2325 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2327 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2328 struct isert_conn *isert_conn = conn->context;
2329 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2330 struct iscsi_text_rsp *hdr =
2331 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2335 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2336 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2341 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2344 struct isert_device *device = isert_conn->device;
2345 struct ib_device *ib_dev = device->ib_device;
2346 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2347 void *txt_rsp_buf = cmd->buf_ptr;
2349 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2350 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2352 isert_cmd->pdu_buf_len = txt_rsp_len;
2353 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2354 tx_dsg->length = txt_rsp_len;
2355 tx_dsg->lkey = device->mr->lkey;
2356 isert_cmd->tx_desc.num_sge = 2;
2358 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2360 isert_dbg("conn %p Text Response\n", isert_conn);
2362 return isert_post_response(isert_conn, isert_cmd);
2366 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2367 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2368 u32 data_left, u32 offset)
2370 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2371 struct scatterlist *sg_start, *tmp_sg;
2372 struct isert_device *device = isert_conn->device;
2373 struct ib_device *ib_dev = device->ib_device;
2374 u32 sg_off, page_off;
2375 int i = 0, sg_nents;
2377 sg_off = offset / PAGE_SIZE;
2378 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2379 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2380 page_off = offset % PAGE_SIZE;
2382 send_wr->sg_list = ib_sge;
2383 send_wr->num_sge = sg_nents;
2384 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2386 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2388 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2389 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2391 (unsigned long long)tmp_sg->dma_address,
2392 tmp_sg->length, page_off);
2394 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2395 ib_sge->length = min_t(u32, data_left,
2396 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2397 ib_sge->lkey = device->mr->lkey;
2399 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2400 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2402 data_left -= ib_sge->length;
2404 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2407 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2408 send_wr->sg_list, send_wr->num_sge);
2414 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2415 struct isert_rdma_wr *wr)
2417 struct se_cmd *se_cmd = &cmd->se_cmd;
2418 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2419 struct isert_conn *isert_conn = conn->context;
2420 struct isert_data_buf *data = &wr->data;
2421 struct ib_send_wr *send_wr;
2422 struct ib_sge *ib_sge;
2423 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2424 int ret = 0, i, ib_sge_cnt;
2426 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2428 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2429 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2430 se_cmd->t_data_nents, se_cmd->data_length,
2431 offset, wr->iser_ib_op, &wr->data);
2435 data_left = data->len;
2436 offset = data->offset;
2438 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2440 isert_warn("Unable to allocate ib_sge\n");
2444 wr->ib_sge = ib_sge;
2446 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2447 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2450 isert_dbg("Unable to allocate wr->send_wr\n");
2455 wr->isert_cmd = isert_cmd;
2456 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2458 for (i = 0; i < wr->send_wr_num; i++) {
2459 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2460 data_len = min(data_left, rdma_write_max);
2462 send_wr->send_flags = 0;
2463 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2464 send_wr->opcode = IB_WR_RDMA_WRITE;
2465 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2466 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2467 if (i + 1 == wr->send_wr_num)
2468 send_wr->next = &isert_cmd->tx_desc.send_wr;
2470 send_wr->next = &wr->send_wr[i + 1];
2472 send_wr->opcode = IB_WR_RDMA_READ;
2473 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2474 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2475 if (i + 1 == wr->send_wr_num)
2476 send_wr->send_flags = IB_SEND_SIGNALED;
2478 send_wr->next = &wr->send_wr[i + 1];
2481 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2482 send_wr, data_len, offset);
2483 ib_sge += ib_sge_cnt;
2486 va_offset += data_len;
2487 data_left -= data_len;
2492 isert_unmap_data_buf(isert_conn, data);
2498 isert_map_fr_pagelist(struct ib_device *ib_dev,
2499 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2501 u64 start_addr, end_addr, page, chunk_start = 0;
2502 struct scatterlist *tmp_sg;
2503 int i = 0, new_chunk, last_ent, n_pages;
2507 last_ent = sg_nents - 1;
2508 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2509 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2511 chunk_start = start_addr;
2512 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2514 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2515 i, (unsigned long long)tmp_sg->dma_address,
2518 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2524 page = chunk_start & PAGE_MASK;
2526 fr_pl[n_pages++] = page;
2527 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2530 } while (page < end_addr);
2537 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2541 memset(inv_wr, 0, sizeof(*inv_wr));
2542 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2543 inv_wr->opcode = IB_WR_LOCAL_INV;
2544 inv_wr->ex.invalidate_rkey = mr->rkey;
2547 rkey = ib_inc_rkey(mr->rkey);
2548 ib_update_fast_reg_key(mr, rkey);
2552 isert_fast_reg_mr(struct isert_conn *isert_conn,
2553 struct fast_reg_descriptor *fr_desc,
2554 struct isert_data_buf *mem,
2555 enum isert_indicator ind,
2558 struct isert_device *device = isert_conn->device;
2559 struct ib_device *ib_dev = device->ib_device;
2561 struct ib_fast_reg_page_list *frpl;
2562 struct ib_send_wr fr_wr, inv_wr;
2563 struct ib_send_wr *bad_wr, *wr = NULL;
2564 int ret, pagelist_len;
2567 if (mem->dma_nents == 1) {
2568 sge->lkey = device->mr->lkey;
2569 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2570 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2571 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2572 sge->addr, sge->length, sge->lkey);
2576 if (ind == ISERT_DATA_KEY_VALID) {
2577 /* Registering data buffer */
2578 mr = fr_desc->data_mr;
2579 frpl = fr_desc->data_frpl;
2581 /* Registering protection buffer */
2582 mr = fr_desc->pi_ctx->prot_mr;
2583 frpl = fr_desc->pi_ctx->prot_frpl;
2586 page_off = mem->offset % PAGE_SIZE;
2588 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2589 fr_desc, mem->nents, mem->offset);
2591 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2592 &frpl->page_list[0]);
2594 if (!(fr_desc->ind & ind)) {
2595 isert_inv_rkey(&inv_wr, mr);
2599 /* Prepare FASTREG WR */
2600 memset(&fr_wr, 0, sizeof(fr_wr));
2601 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2602 fr_wr.opcode = IB_WR_FAST_REG_MR;
2603 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2604 fr_wr.wr.fast_reg.page_list = frpl;
2605 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2606 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2607 fr_wr.wr.fast_reg.length = mem->len;
2608 fr_wr.wr.fast_reg.rkey = mr->rkey;
2609 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2616 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2618 isert_err("fast registration failed, ret:%d\n", ret);
2621 fr_desc->ind &= ~ind;
2623 sge->lkey = mr->lkey;
2624 sge->addr = frpl->page_list[0] + page_off;
2625 sge->length = mem->len;
2627 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2628 sge->addr, sge->length, sge->lkey);
2634 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2635 struct ib_sig_domain *domain)
2637 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2638 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2639 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2640 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2642 * At the moment we hard code those, but if in the future
2643 * the target core would like to use it, we will take it
2646 domain->sig.dif.apptag_check_mask = 0xffff;
2647 domain->sig.dif.app_escape = true;
2648 domain->sig.dif.ref_escape = true;
2649 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2650 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2651 domain->sig.dif.ref_remap = true;
2655 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2657 switch (se_cmd->prot_op) {
2658 case TARGET_PROT_DIN_INSERT:
2659 case TARGET_PROT_DOUT_STRIP:
2660 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2661 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2663 case TARGET_PROT_DOUT_INSERT:
2664 case TARGET_PROT_DIN_STRIP:
2665 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2666 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2668 case TARGET_PROT_DIN_PASS:
2669 case TARGET_PROT_DOUT_PASS:
2670 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2671 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2674 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2682 isert_set_prot_checks(u8 prot_checks)
2684 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2685 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2686 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2690 isert_reg_sig_mr(struct isert_conn *isert_conn,
2691 struct se_cmd *se_cmd,
2692 struct isert_rdma_wr *rdma_wr,
2693 struct fast_reg_descriptor *fr_desc)
2695 struct ib_send_wr sig_wr, inv_wr;
2696 struct ib_send_wr *bad_wr, *wr = NULL;
2697 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2698 struct ib_sig_attrs sig_attrs;
2701 memset(&sig_attrs, 0, sizeof(sig_attrs));
2702 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2706 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2708 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2709 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2713 memset(&sig_wr, 0, sizeof(sig_wr));
2714 sig_wr.opcode = IB_WR_REG_SIG_MR;
2715 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2716 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2718 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2719 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2720 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2721 if (se_cmd->t_prot_sg)
2722 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2729 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2731 isert_err("fast registration failed, ret:%d\n", ret);
2734 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2736 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2737 rdma_wr->ib_sg[SIG].addr = 0;
2738 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2739 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2740 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2742 * We have protection guards on the wire
2743 * so we need to set a larget transfer
2745 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2747 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2748 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2749 rdma_wr->ib_sg[SIG].lkey);
2755 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2756 struct isert_cmd *isert_cmd,
2757 struct isert_rdma_wr *wr)
2759 struct isert_device *device = isert_conn->device;
2760 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2763 if (!wr->fr_desc->pi_ctx) {
2764 ret = isert_create_pi_ctx(wr->fr_desc,
2768 isert_err("conn %p failed to allocate pi_ctx\n",
2774 if (se_cmd->t_prot_sg) {
2775 ret = isert_map_data_buf(isert_conn, isert_cmd,
2777 se_cmd->t_prot_nents,
2778 se_cmd->prot_length,
2779 0, wr->iser_ib_op, &wr->prot);
2781 isert_err("conn %p failed to map protection buffer\n",
2786 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2787 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2788 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2790 isert_err("conn %p failed to fast reg mr\n",
2792 goto unmap_prot_cmd;
2796 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2798 isert_err("conn %p failed to fast reg mr\n",
2800 goto unmap_prot_cmd;
2802 wr->fr_desc->ind |= ISERT_PROTECTED;
2807 if (se_cmd->t_prot_sg)
2808 isert_unmap_data_buf(isert_conn, &wr->prot);
2814 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2815 struct isert_rdma_wr *wr)
2817 struct se_cmd *se_cmd = &cmd->se_cmd;
2818 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2819 struct isert_conn *isert_conn = conn->context;
2820 struct fast_reg_descriptor *fr_desc = NULL;
2821 struct ib_send_wr *send_wr;
2822 struct ib_sge *ib_sg;
2825 unsigned long flags;
2827 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2829 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2830 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2831 se_cmd->t_data_nents, se_cmd->data_length,
2832 offset, wr->iser_ib_op, &wr->data);
2836 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2837 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2838 fr_desc = list_first_entry(&isert_conn->fr_pool,
2839 struct fast_reg_descriptor, list);
2840 list_del(&fr_desc->list);
2841 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2842 wr->fr_desc = fr_desc;
2845 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2846 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2850 if (isert_prot_cmd(isert_conn, se_cmd)) {
2851 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2855 ib_sg = &wr->ib_sg[SIG];
2857 ib_sg = &wr->ib_sg[DATA];
2860 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2861 wr->ib_sge = &wr->s_ib_sge;
2862 wr->send_wr_num = 1;
2863 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2864 wr->send_wr = &wr->s_send_wr;
2865 wr->isert_cmd = isert_cmd;
2867 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2868 send_wr->sg_list = &wr->s_ib_sge;
2869 send_wr->num_sge = 1;
2870 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2871 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2872 send_wr->opcode = IB_WR_RDMA_WRITE;
2873 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2874 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2875 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2876 0 : IB_SEND_SIGNALED;
2878 send_wr->opcode = IB_WR_RDMA_READ;
2879 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2880 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2881 send_wr->send_flags = IB_SEND_SIGNALED;
2888 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2889 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2890 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2892 isert_unmap_data_buf(isert_conn, &wr->data);
2898 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2900 struct se_cmd *se_cmd = &cmd->se_cmd;
2901 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2902 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2903 struct isert_conn *isert_conn = conn->context;
2904 struct isert_device *device = isert_conn->device;
2905 struct ib_send_wr *wr_failed;
2908 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2909 isert_cmd, se_cmd->data_length);
2911 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2912 rc = device->reg_rdma_mem(conn, cmd, wr);
2914 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2918 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2920 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2922 isert_create_send_desc(isert_conn, isert_cmd,
2923 &isert_cmd->tx_desc);
2924 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2925 &isert_cmd->tx_desc.iscsi_header);
2926 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2927 isert_init_send_wr(isert_conn, isert_cmd,
2928 &isert_cmd->tx_desc.send_wr);
2929 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2930 wr->send_wr_num += 1;
2933 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
2935 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2937 if (!isert_prot_cmd(isert_conn, se_cmd))
2938 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2939 "READ\n", isert_cmd);
2941 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2948 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2950 struct se_cmd *se_cmd = &cmd->se_cmd;
2951 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2952 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2953 struct isert_conn *isert_conn = conn->context;
2954 struct isert_device *device = isert_conn->device;
2955 struct ib_send_wr *wr_failed;
2958 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
2959 isert_cmd, se_cmd->data_length, cmd->write_data_done);
2960 wr->iser_ib_op = ISER_IB_RDMA_READ;
2961 rc = device->reg_rdma_mem(conn, cmd, wr);
2963 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2967 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
2969 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
2971 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
2978 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2983 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
2984 ret = isert_put_nopin(cmd, conn, false);
2987 isert_err("Unknown immediate state: 0x%02x\n", state);
2996 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
2998 struct isert_conn *isert_conn = conn->context;
3002 case ISTATE_SEND_LOGOUTRSP:
3003 ret = isert_put_logout_rsp(cmd, conn);
3005 isert_conn->logout_posted = true;
3007 case ISTATE_SEND_NOPIN:
3008 ret = isert_put_nopin(cmd, conn, true);
3010 case ISTATE_SEND_TASKMGTRSP:
3011 ret = isert_put_tm_rsp(cmd, conn);
3013 case ISTATE_SEND_REJECT:
3014 ret = isert_put_reject(cmd, conn);
3016 case ISTATE_SEND_TEXTRSP:
3017 ret = isert_put_text_rsp(cmd, conn);
3019 case ISTATE_SEND_STATUS:
3021 * Special case for sending non GOOD SCSI status from TX thread
3022 * context during pre se_cmd excecution failure.
3024 ret = isert_put_response(conn, cmd);
3027 isert_err("Unknown response state: 0x%02x\n", state);
3036 isert_setup_id(struct isert_np *isert_np)
3038 struct iscsi_np *np = isert_np->np;
3039 struct rdma_cm_id *id;
3040 struct sockaddr *sa;
3043 sa = (struct sockaddr *)&np->np_sockaddr;
3044 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3046 id = rdma_create_id(isert_cma_handler, isert_np,
3047 RDMA_PS_TCP, IB_QPT_RC);
3049 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3053 isert_dbg("id %p context %p\n", id, id->context);
3055 ret = rdma_bind_addr(id, sa);
3057 isert_err("rdma_bind_addr() failed: %d\n", ret);
3061 ret = rdma_listen(id, 0);
3063 isert_err("rdma_listen() failed: %d\n", ret);
3069 rdma_destroy_id(id);
3071 return ERR_PTR(ret);
3075 isert_setup_np(struct iscsi_np *np,
3076 struct __kernel_sockaddr_storage *ksockaddr)
3078 struct isert_np *isert_np;
3079 struct rdma_cm_id *isert_lid;
3082 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3084 isert_err("Unable to allocate struct isert_np\n");
3087 sema_init(&isert_np->np_sem, 0);
3088 mutex_init(&isert_np->np_accept_mutex);
3089 INIT_LIST_HEAD(&isert_np->np_accept_list);
3090 init_completion(&isert_np->np_login_comp);
3094 * Setup the np->np_sockaddr from the passed sockaddr setup
3095 * in iscsi_target_configfs.c code..
3097 memcpy(&np->np_sockaddr, ksockaddr,
3098 sizeof(struct __kernel_sockaddr_storage));
3100 isert_lid = isert_setup_id(isert_np);
3101 if (IS_ERR(isert_lid)) {
3102 ret = PTR_ERR(isert_lid);
3106 isert_np->np_cm_id = isert_lid;
3107 np->np_context = isert_np;
3118 isert_rdma_accept(struct isert_conn *isert_conn)
3120 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3121 struct rdma_conn_param cp;
3124 memset(&cp, 0, sizeof(struct rdma_conn_param));
3125 cp.initiator_depth = isert_conn->initiator_depth;
3127 cp.rnr_retry_count = 7;
3129 ret = rdma_accept(cm_id, &cp);
3131 isert_err("rdma_accept() failed with: %d\n", ret);
3139 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3141 struct isert_conn *isert_conn = conn->context;
3144 isert_info("before login_req comp conn: %p\n", isert_conn);
3145 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3147 isert_err("isert_conn %p interrupted before got login req\n",
3151 reinit_completion(&isert_conn->login_req_comp);
3154 * For login requests after the first PDU, isert_rx_login_req() will
3155 * kick schedule_delayed_work(&conn->login_work) as the packet is
3156 * received, which turns this callback from iscsi_target_do_login_rx()
3159 if (!login->first_request)
3162 isert_rx_login_req(isert_conn);
3164 isert_info("before login_comp conn: %p\n", conn);
3165 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
3169 isert_info("processing login->req: %p\n", login->req);
3175 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3176 struct isert_conn *isert_conn)
3178 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3179 struct rdma_route *cm_route = &cm_id->route;
3180 struct sockaddr_in *sock_in;
3181 struct sockaddr_in6 *sock_in6;
3183 conn->login_family = np->np_sockaddr.ss_family;
3185 if (np->np_sockaddr.ss_family == AF_INET6) {
3186 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.dst_addr;
3187 snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
3188 &sock_in6->sin6_addr.in6_u);
3189 conn->login_port = ntohs(sock_in6->sin6_port);
3191 sock_in6 = (struct sockaddr_in6 *)&cm_route->addr.src_addr;
3192 snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
3193 &sock_in6->sin6_addr.in6_u);
3194 conn->local_port = ntohs(sock_in6->sin6_port);
3196 sock_in = (struct sockaddr_in *)&cm_route->addr.dst_addr;
3197 sprintf(conn->login_ip, "%pI4",
3198 &sock_in->sin_addr.s_addr);
3199 conn->login_port = ntohs(sock_in->sin_port);
3201 sock_in = (struct sockaddr_in *)&cm_route->addr.src_addr;
3202 sprintf(conn->local_ip, "%pI4",
3203 &sock_in->sin_addr.s_addr);
3204 conn->local_port = ntohs(sock_in->sin_port);
3209 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3211 struct isert_np *isert_np = np->np_context;
3212 struct isert_conn *isert_conn;
3216 ret = down_interruptible(&isert_np->np_sem);
3220 spin_lock_bh(&np->np_thread_lock);
3221 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3222 spin_unlock_bh(&np->np_thread_lock);
3223 isert_dbg("np_thread_state %d\n",
3224 np->np_thread_state);
3226 * No point in stalling here when np_thread
3227 * is in state RESET/SHUTDOWN/EXIT - bail
3231 spin_unlock_bh(&np->np_thread_lock);
3233 mutex_lock(&isert_np->np_accept_mutex);
3234 if (list_empty(&isert_np->np_accept_list)) {
3235 mutex_unlock(&isert_np->np_accept_mutex);
3238 isert_conn = list_first_entry(&isert_np->np_accept_list,
3239 struct isert_conn, accept_node);
3240 list_del_init(&isert_conn->accept_node);
3241 mutex_unlock(&isert_np->np_accept_mutex);
3243 conn->context = isert_conn;
3244 isert_conn->conn = conn;
3246 isert_set_conn_info(np, conn, isert_conn);
3248 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3254 isert_free_np(struct iscsi_np *np)
3256 struct isert_np *isert_np = np->np_context;
3257 struct isert_conn *isert_conn, *n;
3259 if (isert_np->np_cm_id)
3260 rdma_destroy_id(isert_np->np_cm_id);
3263 * FIXME: At this point we don't have a good way to insure
3264 * that at this point we don't have hanging connections that
3265 * completed RDMA establishment but didn't start iscsi login
3266 * process. So work-around this by cleaning up what ever piled
3267 * up in np_accept_list.
3269 mutex_lock(&isert_np->np_accept_mutex);
3270 if (!list_empty(&isert_np->np_accept_list)) {
3271 isert_info("Still have isert connections, cleaning up...\n");
3272 list_for_each_entry_safe(isert_conn, n,
3273 &isert_np->np_accept_list,
3275 isert_info("cleaning isert_conn %p state (%d)\n",
3276 isert_conn, isert_conn->state);
3277 isert_connect_release(isert_conn);
3280 mutex_unlock(&isert_np->np_accept_mutex);
3282 np->np_context = NULL;
3286 static void isert_release_work(struct work_struct *work)
3288 struct isert_conn *isert_conn = container_of(work,
3292 isert_info("Starting release conn %p\n", isert_conn);
3294 wait_for_completion(&isert_conn->wait);
3296 mutex_lock(&isert_conn->mutex);
3297 isert_conn->state = ISER_CONN_DOWN;
3298 mutex_unlock(&isert_conn->mutex);
3300 isert_info("Destroying conn %p\n", isert_conn);
3301 isert_put_conn(isert_conn);
3305 isert_wait4logout(struct isert_conn *isert_conn)
3307 struct iscsi_conn *conn = isert_conn->conn;
3309 isert_info("conn %p\n", isert_conn);
3311 if (isert_conn->logout_posted) {
3312 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3313 wait_for_completion_timeout(&conn->conn_logout_comp,
3314 SECONDS_FOR_LOGOUT_COMP * HZ);
3319 isert_wait4cmds(struct iscsi_conn *conn)
3321 isert_info("iscsi_conn %p\n", conn);
3324 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3325 target_wait_for_sess_cmds(conn->sess->se_sess);
3330 isert_wait4flush(struct isert_conn *isert_conn)
3332 struct ib_recv_wr *bad_wr;
3334 isert_info("conn %p\n", isert_conn);
3336 init_completion(&isert_conn->wait_comp_err);
3337 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3338 /* post an indication that all flush errors were consumed */
3339 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3340 isert_err("conn %p failed to post beacon", isert_conn);
3344 wait_for_completion(&isert_conn->wait_comp_err);
3347 static void isert_wait_conn(struct iscsi_conn *conn)
3349 struct isert_conn *isert_conn = conn->context;
3351 isert_info("Starting conn %p\n", isert_conn);
3353 mutex_lock(&isert_conn->mutex);
3355 * Only wait for wait_comp_err if the isert_conn made it
3356 * into full feature phase..
3358 if (isert_conn->state == ISER_CONN_INIT) {
3359 mutex_unlock(&isert_conn->mutex);
3362 isert_conn_terminate(isert_conn);
3363 mutex_unlock(&isert_conn->mutex);
3365 isert_wait4cmds(conn);
3366 isert_wait4flush(isert_conn);
3367 isert_wait4logout(isert_conn);
3369 INIT_WORK(&isert_conn->release_work, isert_release_work);
3370 queue_work(isert_release_wq, &isert_conn->release_work);
3373 static void isert_free_conn(struct iscsi_conn *conn)
3375 struct isert_conn *isert_conn = conn->context;
3377 isert_put_conn(isert_conn);
3380 static struct iscsit_transport iser_target_transport = {
3382 .transport_type = ISCSI_INFINIBAND,
3383 .priv_size = sizeof(struct isert_cmd),
3384 .owner = THIS_MODULE,
3385 .iscsit_setup_np = isert_setup_np,
3386 .iscsit_accept_np = isert_accept_np,
3387 .iscsit_free_np = isert_free_np,
3388 .iscsit_wait_conn = isert_wait_conn,
3389 .iscsit_free_conn = isert_free_conn,
3390 .iscsit_get_login_rx = isert_get_login_rx,
3391 .iscsit_put_login_tx = isert_put_login_tx,
3392 .iscsit_immediate_queue = isert_immediate_queue,
3393 .iscsit_response_queue = isert_response_queue,
3394 .iscsit_get_dataout = isert_get_dataout,
3395 .iscsit_queue_data_in = isert_put_datain,
3396 .iscsit_queue_status = isert_put_response,
3397 .iscsit_aborted_task = isert_aborted_task,
3398 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3401 static int __init isert_init(void)
3405 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3406 WQ_UNBOUND | WQ_HIGHPRI, 0);
3407 if (!isert_comp_wq) {
3408 isert_err("Unable to allocate isert_comp_wq\n");
3413 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3414 WQ_UNBOUND_MAX_ACTIVE);
3415 if (!isert_release_wq) {
3416 isert_err("Unable to allocate isert_release_wq\n");
3418 goto destroy_comp_wq;
3421 iscsit_register_transport(&iser_target_transport);
3422 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3427 destroy_workqueue(isert_comp_wq);
3432 static void __exit isert_exit(void)
3434 flush_scheduled_work();
3435 destroy_workqueue(isert_release_wq);
3436 destroy_workqueue(isert_comp_wq);
3437 iscsit_unregister_transport(&iser_target_transport);
3438 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3441 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3442 MODULE_VERSION("1.0");
3443 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3444 MODULE_LICENSE("GPL");
3446 module_init(isert_init);
3447 module_exit(isert_exit);