1 /*******************************************************************************
2 * This file contains iSCSI extentions for RDMA (iSER) Verbs
4 * (c) Copyright 2013 Datera, Inc.
6 * Nicholas A. Bellinger <nab@linux-iscsi.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 ****************************************************************************/
19 #include <linux/string.h>
20 #include <linux/module.h>
21 #include <linux/scatterlist.h>
22 #include <linux/socket.h>
24 #include <linux/in6.h>
25 #include <rdma/ib_verbs.h>
26 #include <rdma/rdma_cm.h>
27 #include <target/target_core_base.h>
28 #include <target/target_core_fabric.h>
29 #include <target/iscsi/iscsi_transport.h>
30 #include <linux/semaphore.h>
32 #include "isert_proto.h"
35 #define ISERT_MAX_CONN 8
36 #define ISER_MAX_RX_CQ_LEN (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
37 #define ISER_MAX_TX_CQ_LEN (ISERT_QP_MAX_REQ_DTOS * ISERT_MAX_CONN)
38 #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
41 static int isert_debug_level;
42 module_param_named(debug_level, isert_debug_level, int, 0644);
43 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
45 static DEFINE_MUTEX(device_list_mutex);
46 static LIST_HEAD(device_list);
47 static struct workqueue_struct *isert_comp_wq;
48 static struct workqueue_struct *isert_release_wq;
51 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
53 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
54 struct isert_rdma_wr *wr);
56 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
58 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
59 struct isert_rdma_wr *wr);
61 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
63 isert_rdma_post_recvl(struct isert_conn *isert_conn);
65 isert_rdma_accept(struct isert_conn *isert_conn);
66 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
68 static void isert_release_work(struct work_struct *work);
71 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
73 return (conn->pi_support &&
74 cmd->prot_op != TARGET_PROT_NORMAL);
79 isert_qp_event_callback(struct ib_event *e, void *context)
81 struct isert_conn *isert_conn = context;
83 isert_err("%s (%d): conn %p\n",
84 ib_event_msg(e->event), e->event, isert_conn);
87 case IB_EVENT_COMM_EST:
88 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST);
90 case IB_EVENT_QP_LAST_WQE_REACHED:
91 isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
99 isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
103 ret = ib_query_device(ib_dev, devattr);
105 isert_err("ib_query_device() failed: %d\n", ret);
108 isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
109 isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
114 static struct isert_comp *
115 isert_comp_get(struct isert_conn *isert_conn)
117 struct isert_device *device = isert_conn->device;
118 struct isert_comp *comp;
121 mutex_lock(&device_list_mutex);
122 for (i = 0; i < device->comps_used; i++)
123 if (device->comps[i].active_qps <
124 device->comps[min].active_qps)
126 comp = &device->comps[min];
128 mutex_unlock(&device_list_mutex);
130 isert_info("conn %p, using comp %p min_index: %d\n",
131 isert_conn, comp, min);
137 isert_comp_put(struct isert_comp *comp)
139 mutex_lock(&device_list_mutex);
141 mutex_unlock(&device_list_mutex);
144 static struct ib_qp *
145 isert_create_qp(struct isert_conn *isert_conn,
146 struct isert_comp *comp,
147 struct rdma_cm_id *cma_id)
149 struct isert_device *device = isert_conn->device;
150 struct ib_qp_init_attr attr;
153 memset(&attr, 0, sizeof(struct ib_qp_init_attr));
154 attr.event_handler = isert_qp_event_callback;
155 attr.qp_context = isert_conn;
156 attr.send_cq = comp->cq;
157 attr.recv_cq = comp->cq;
158 attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
159 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
161 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
162 * work-around for RDMA_READs with ConnectX-2.
164 * Also, still make sure to have at least two SGEs for
165 * outgoing control PDU responses.
167 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
168 isert_conn->max_sge = attr.cap.max_send_sge;
170 attr.cap.max_recv_sge = 1;
171 attr.sq_sig_type = IB_SIGNAL_REQ_WR;
172 attr.qp_type = IB_QPT_RC;
173 if (device->pi_capable)
174 attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
176 ret = rdma_create_qp(cma_id, device->pd, &attr);
178 isert_err("rdma_create_qp failed for cma_id %d\n", ret);
186 isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
188 struct isert_comp *comp;
191 comp = isert_comp_get(isert_conn);
192 isert_conn->qp = isert_create_qp(isert_conn, comp, cma_id);
193 if (IS_ERR(isert_conn->qp)) {
194 ret = PTR_ERR(isert_conn->qp);
200 isert_comp_put(comp);
205 isert_cq_event_callback(struct ib_event *e, void *context)
207 isert_dbg("event: %d\n", e->event);
211 isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
213 struct isert_device *device = isert_conn->device;
214 struct ib_device *ib_dev = device->ib_device;
215 struct iser_rx_desc *rx_desc;
216 struct ib_sge *rx_sg;
220 isert_conn->rx_descs = kzalloc(ISERT_QP_MAX_RECV_DTOS *
221 sizeof(struct iser_rx_desc), GFP_KERNEL);
222 if (!isert_conn->rx_descs)
225 rx_desc = isert_conn->rx_descs;
227 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
228 dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
229 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
230 if (ib_dma_mapping_error(ib_dev, dma_addr))
233 rx_desc->dma_addr = dma_addr;
235 rx_sg = &rx_desc->rx_sg;
236 rx_sg->addr = rx_desc->dma_addr;
237 rx_sg->length = ISER_RX_PAYLOAD_SIZE;
238 rx_sg->lkey = device->pd->local_dma_lkey;
244 rx_desc = isert_conn->rx_descs;
245 for (j = 0; j < i; j++, rx_desc++) {
246 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
247 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
249 kfree(isert_conn->rx_descs);
250 isert_conn->rx_descs = NULL;
252 isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
258 isert_free_rx_descriptors(struct isert_conn *isert_conn)
260 struct ib_device *ib_dev = isert_conn->device->ib_device;
261 struct iser_rx_desc *rx_desc;
264 if (!isert_conn->rx_descs)
267 rx_desc = isert_conn->rx_descs;
268 for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
269 ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
270 ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
273 kfree(isert_conn->rx_descs);
274 isert_conn->rx_descs = NULL;
277 static void isert_cq_work(struct work_struct *);
278 static void isert_cq_callback(struct ib_cq *, void *);
281 isert_free_comps(struct isert_device *device)
285 for (i = 0; i < device->comps_used; i++) {
286 struct isert_comp *comp = &device->comps[i];
289 cancel_work_sync(&comp->work);
290 ib_destroy_cq(comp->cq);
293 kfree(device->comps);
297 isert_alloc_comps(struct isert_device *device,
298 struct ib_device_attr *attr)
300 int i, max_cqe, ret = 0;
302 device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
303 device->ib_device->num_comp_vectors));
305 isert_info("Using %d CQs, %s supports %d vectors support "
306 "Fast registration %d pi_capable %d\n",
307 device->comps_used, device->ib_device->name,
308 device->ib_device->num_comp_vectors, device->use_fastreg,
311 device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
313 if (!device->comps) {
314 isert_err("Unable to allocate completion contexts\n");
318 max_cqe = min(ISER_MAX_CQ_LEN, attr->max_cqe);
320 for (i = 0; i < device->comps_used; i++) {
321 struct ib_cq_init_attr cq_attr = {};
322 struct isert_comp *comp = &device->comps[i];
324 comp->device = device;
325 INIT_WORK(&comp->work, isert_cq_work);
326 cq_attr.cqe = max_cqe;
327 cq_attr.comp_vector = i;
328 comp->cq = ib_create_cq(device->ib_device,
330 isert_cq_event_callback,
333 if (IS_ERR(comp->cq)) {
334 isert_err("Unable to allocate cq\n");
335 ret = PTR_ERR(comp->cq);
340 ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
347 isert_free_comps(device);
352 isert_create_device_ib_res(struct isert_device *device)
354 struct ib_device_attr *dev_attr;
357 dev_attr = &device->dev_attr;
358 ret = isert_query_device(device->ib_device, dev_attr);
362 /* asign function handlers */
363 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
364 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
365 device->use_fastreg = 1;
366 device->reg_rdma_mem = isert_reg_rdma;
367 device->unreg_rdma_mem = isert_unreg_rdma;
369 device->use_fastreg = 0;
370 device->reg_rdma_mem = isert_map_rdma;
371 device->unreg_rdma_mem = isert_unmap_cmd;
374 ret = isert_alloc_comps(device, dev_attr);
378 device->pd = ib_alloc_pd(device->ib_device);
379 if (IS_ERR(device->pd)) {
380 ret = PTR_ERR(device->pd);
381 isert_err("failed to allocate pd, device %p, ret=%d\n",
386 /* Check signature cap */
387 device->pi_capable = dev_attr->device_cap_flags &
388 IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
393 isert_free_comps(device);
398 isert_free_device_ib_res(struct isert_device *device)
400 isert_info("device %p\n", device);
402 ib_dealloc_pd(device->pd);
403 isert_free_comps(device);
407 isert_device_put(struct isert_device *device)
409 mutex_lock(&device_list_mutex);
411 isert_info("device %p refcount %d\n", device, device->refcount);
412 if (!device->refcount) {
413 isert_free_device_ib_res(device);
414 list_del(&device->dev_node);
417 mutex_unlock(&device_list_mutex);
420 static struct isert_device *
421 isert_device_get(struct rdma_cm_id *cma_id)
423 struct isert_device *device;
426 mutex_lock(&device_list_mutex);
427 list_for_each_entry(device, &device_list, dev_node) {
428 if (device->ib_device->node_guid == cma_id->device->node_guid) {
430 isert_info("Found iser device %p refcount %d\n",
431 device, device->refcount);
432 mutex_unlock(&device_list_mutex);
437 device = kzalloc(sizeof(struct isert_device), GFP_KERNEL);
439 mutex_unlock(&device_list_mutex);
440 return ERR_PTR(-ENOMEM);
443 INIT_LIST_HEAD(&device->dev_node);
445 device->ib_device = cma_id->device;
446 ret = isert_create_device_ib_res(device);
449 mutex_unlock(&device_list_mutex);
454 list_add_tail(&device->dev_node, &device_list);
455 isert_info("Created a new iser device %p refcount %d\n",
456 device, device->refcount);
457 mutex_unlock(&device_list_mutex);
463 isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
465 struct fast_reg_descriptor *fr_desc, *tmp;
468 if (list_empty(&isert_conn->fr_pool))
471 isert_info("Freeing conn %p fastreg pool", isert_conn);
473 list_for_each_entry_safe(fr_desc, tmp,
474 &isert_conn->fr_pool, list) {
475 list_del(&fr_desc->list);
476 ib_free_fast_reg_page_list(fr_desc->data_frpl);
477 ib_dereg_mr(fr_desc->data_mr);
478 if (fr_desc->pi_ctx) {
479 ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
480 ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
481 ib_dereg_mr(fr_desc->pi_ctx->sig_mr);
482 kfree(fr_desc->pi_ctx);
488 if (i < isert_conn->fr_pool_size)
489 isert_warn("Pool still has %d regions registered\n",
490 isert_conn->fr_pool_size - i);
494 isert_create_pi_ctx(struct fast_reg_descriptor *desc,
495 struct ib_device *device,
498 struct pi_context *pi_ctx;
501 pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
503 isert_err("Failed to allocate pi context\n");
507 pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
508 ISCSI_ISER_SG_TABLESIZE);
509 if (IS_ERR(pi_ctx->prot_frpl)) {
510 isert_err("Failed to allocate prot frpl err=%ld\n",
511 PTR_ERR(pi_ctx->prot_frpl));
512 ret = PTR_ERR(pi_ctx->prot_frpl);
516 pi_ctx->prot_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
517 ISCSI_ISER_SG_TABLESIZE);
518 if (IS_ERR(pi_ctx->prot_mr)) {
519 isert_err("Failed to allocate prot frmr err=%ld\n",
520 PTR_ERR(pi_ctx->prot_mr));
521 ret = PTR_ERR(pi_ctx->prot_mr);
524 desc->ind |= ISERT_PROT_KEY_VALID;
526 pi_ctx->sig_mr = ib_alloc_mr(pd, IB_MR_TYPE_SIGNATURE, 2);
527 if (IS_ERR(pi_ctx->sig_mr)) {
528 isert_err("Failed to allocate signature enabled mr err=%ld\n",
529 PTR_ERR(pi_ctx->sig_mr));
530 ret = PTR_ERR(pi_ctx->sig_mr);
534 desc->pi_ctx = pi_ctx;
535 desc->ind |= ISERT_SIG_KEY_VALID;
536 desc->ind &= ~ISERT_PROTECTED;
541 ib_dereg_mr(pi_ctx->prot_mr);
543 ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
551 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
552 struct fast_reg_descriptor *fr_desc)
556 fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
557 ISCSI_ISER_SG_TABLESIZE);
558 if (IS_ERR(fr_desc->data_frpl)) {
559 isert_err("Failed to allocate data frpl err=%ld\n",
560 PTR_ERR(fr_desc->data_frpl));
561 return PTR_ERR(fr_desc->data_frpl);
564 fr_desc->data_mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
565 ISCSI_ISER_SG_TABLESIZE);
566 if (IS_ERR(fr_desc->data_mr)) {
567 isert_err("Failed to allocate data frmr err=%ld\n",
568 PTR_ERR(fr_desc->data_mr));
569 ret = PTR_ERR(fr_desc->data_mr);
572 fr_desc->ind |= ISERT_DATA_KEY_VALID;
574 isert_dbg("Created fr_desc %p\n", fr_desc);
579 ib_free_fast_reg_page_list(fr_desc->data_frpl);
585 isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
587 struct fast_reg_descriptor *fr_desc;
588 struct isert_device *device = isert_conn->device;
589 struct se_session *se_sess = isert_conn->conn->sess->se_sess;
590 struct se_node_acl *se_nacl = se_sess->se_node_acl;
593 * Setup the number of FRMRs based upon the number of tags
594 * available to session in iscsi_target_locate_portal().
596 tag_num = max_t(u32, ISCSIT_MIN_TAGS, se_nacl->queue_depth);
597 tag_num = (tag_num * 2) + ISCSIT_EXTRA_TAGS;
599 isert_conn->fr_pool_size = 0;
600 for (i = 0; i < tag_num; i++) {
601 fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
603 isert_err("Failed to allocate fast_reg descriptor\n");
608 ret = isert_create_fr_desc(device->ib_device,
609 device->pd, fr_desc);
611 isert_err("Failed to create fastreg descriptor err=%d\n",
617 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
618 isert_conn->fr_pool_size++;
621 isert_dbg("Creating conn %p fastreg pool size=%d",
622 isert_conn, isert_conn->fr_pool_size);
627 isert_conn_free_fastreg_pool(isert_conn);
632 isert_init_conn(struct isert_conn *isert_conn)
634 isert_conn->state = ISER_CONN_INIT;
635 INIT_LIST_HEAD(&isert_conn->node);
636 init_completion(&isert_conn->login_comp);
637 init_completion(&isert_conn->login_req_comp);
638 init_completion(&isert_conn->wait);
639 kref_init(&isert_conn->kref);
640 mutex_init(&isert_conn->mutex);
641 spin_lock_init(&isert_conn->pool_lock);
642 INIT_LIST_HEAD(&isert_conn->fr_pool);
643 INIT_WORK(&isert_conn->release_work, isert_release_work);
647 isert_free_login_buf(struct isert_conn *isert_conn)
649 struct ib_device *ib_dev = isert_conn->device->ib_device;
651 ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
652 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
653 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
654 ISCSI_DEF_MAX_RECV_SEG_LEN,
656 kfree(isert_conn->login_buf);
660 isert_alloc_login_buf(struct isert_conn *isert_conn,
661 struct ib_device *ib_dev)
665 isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
666 ISER_RX_LOGIN_SIZE, GFP_KERNEL);
667 if (!isert_conn->login_buf) {
668 isert_err("Unable to allocate isert_conn->login_buf\n");
672 isert_conn->login_req_buf = isert_conn->login_buf;
673 isert_conn->login_rsp_buf = isert_conn->login_buf +
674 ISCSI_DEF_MAX_RECV_SEG_LEN;
676 isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
677 isert_conn->login_buf, isert_conn->login_req_buf,
678 isert_conn->login_rsp_buf);
680 isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
681 (void *)isert_conn->login_req_buf,
682 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
684 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
686 isert_err("login_req_dma mapping error: %d\n", ret);
687 isert_conn->login_req_dma = 0;
691 isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
692 (void *)isert_conn->login_rsp_buf,
693 ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
695 ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
697 isert_err("login_rsp_dma mapping error: %d\n", ret);
698 isert_conn->login_rsp_dma = 0;
699 goto out_req_dma_map;
705 ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
706 ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_FROM_DEVICE);
708 kfree(isert_conn->login_buf);
713 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
715 struct isert_np *isert_np = cma_id->context;
716 struct iscsi_np *np = isert_np->np;
717 struct isert_conn *isert_conn;
718 struct isert_device *device;
721 spin_lock_bh(&np->np_thread_lock);
723 spin_unlock_bh(&np->np_thread_lock);
724 isert_dbg("iscsi_np is not enabled, reject connect request\n");
725 return rdma_reject(cma_id, NULL, 0);
727 spin_unlock_bh(&np->np_thread_lock);
729 isert_dbg("cma_id: %p, portal: %p\n",
730 cma_id, cma_id->context);
732 isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
736 isert_init_conn(isert_conn);
737 isert_conn->cm_id = cma_id;
739 ret = isert_alloc_login_buf(isert_conn, cma_id->device);
743 device = isert_device_get(cma_id);
744 if (IS_ERR(device)) {
745 ret = PTR_ERR(device);
746 goto out_rsp_dma_map;
748 isert_conn->device = device;
750 /* Set max inflight RDMA READ requests */
751 isert_conn->initiator_depth = min_t(u8,
752 event->param.conn.initiator_depth,
753 device->dev_attr.max_qp_init_rd_atom);
754 isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
756 ret = isert_conn_setup_qp(isert_conn, cma_id);
760 ret = isert_rdma_post_recvl(isert_conn);
764 ret = isert_rdma_accept(isert_conn);
768 mutex_lock(&isert_np->mutex);
769 list_add_tail(&isert_conn->node, &isert_np->accepted);
770 mutex_unlock(&isert_np->mutex);
775 isert_device_put(device);
777 isert_free_login_buf(isert_conn);
780 rdma_reject(cma_id, NULL, 0);
785 isert_connect_release(struct isert_conn *isert_conn)
787 struct isert_device *device = isert_conn->device;
789 isert_dbg("conn %p\n", isert_conn);
793 if (device->use_fastreg)
794 isert_conn_free_fastreg_pool(isert_conn);
796 isert_free_rx_descriptors(isert_conn);
797 if (isert_conn->cm_id)
798 rdma_destroy_id(isert_conn->cm_id);
800 if (isert_conn->qp) {
801 struct isert_comp *comp = isert_conn->qp->recv_cq->cq_context;
803 isert_comp_put(comp);
804 ib_destroy_qp(isert_conn->qp);
807 if (isert_conn->login_buf)
808 isert_free_login_buf(isert_conn);
810 isert_device_put(device);
816 isert_connected_handler(struct rdma_cm_id *cma_id)
818 struct isert_conn *isert_conn = cma_id->qp->qp_context;
819 struct isert_np *isert_np = cma_id->context;
821 isert_info("conn %p\n", isert_conn);
823 mutex_lock(&isert_conn->mutex);
824 isert_conn->state = ISER_CONN_UP;
825 kref_get(&isert_conn->kref);
826 mutex_unlock(&isert_conn->mutex);
828 mutex_lock(&isert_np->mutex);
829 list_move_tail(&isert_conn->node, &isert_np->pending);
830 mutex_unlock(&isert_np->mutex);
832 isert_info("np %p: Allow accept_np to continue\n", isert_np);
837 isert_release_kref(struct kref *kref)
839 struct isert_conn *isert_conn = container_of(kref,
840 struct isert_conn, kref);
842 isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
845 isert_connect_release(isert_conn);
849 isert_put_conn(struct isert_conn *isert_conn)
851 kref_put(&isert_conn->kref, isert_release_kref);
855 * isert_conn_terminate() - Initiate connection termination
856 * @isert_conn: isert connection struct
859 * In case the connection state is FULL_FEATURE, move state
860 * to TEMINATING and start teardown sequence (rdma_disconnect).
861 * In case the connection state is UP, complete flush as well.
863 * This routine must be called with mutex held. Thus it is
864 * safe to call multiple times.
867 isert_conn_terminate(struct isert_conn *isert_conn)
871 switch (isert_conn->state) {
872 case ISER_CONN_TERMINATING:
875 case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
876 isert_info("Terminating conn %p state %d\n",
877 isert_conn, isert_conn->state);
878 isert_conn->state = ISER_CONN_TERMINATING;
879 err = rdma_disconnect(isert_conn->cm_id);
881 isert_warn("Failed rdma_disconnect isert_conn %p\n",
885 isert_warn("conn %p teminating in state %d\n",
886 isert_conn, isert_conn->state);
891 isert_np_cma_handler(struct isert_np *isert_np,
892 enum rdma_cm_event_type event)
894 isert_dbg("%s (%d): isert np %p\n",
895 rdma_event_msg(event), event, isert_np);
898 case RDMA_CM_EVENT_DEVICE_REMOVAL:
899 isert_np->cm_id = NULL;
901 case RDMA_CM_EVENT_ADDR_CHANGE:
902 isert_np->cm_id = isert_setup_id(isert_np);
903 if (IS_ERR(isert_np->cm_id)) {
904 isert_err("isert np %p setup id failed: %ld\n",
905 isert_np, PTR_ERR(isert_np->cm_id));
906 isert_np->cm_id = NULL;
910 isert_err("isert np %p Unexpected event %d\n",
918 isert_disconnected_handler(struct rdma_cm_id *cma_id,
919 enum rdma_cm_event_type event)
921 struct isert_np *isert_np = cma_id->context;
922 struct isert_conn *isert_conn;
923 bool terminating = false;
925 if (isert_np->cm_id == cma_id)
926 return isert_np_cma_handler(cma_id->context, event);
928 isert_conn = cma_id->qp->qp_context;
930 mutex_lock(&isert_conn->mutex);
931 terminating = (isert_conn->state == ISER_CONN_TERMINATING);
932 isert_conn_terminate(isert_conn);
933 mutex_unlock(&isert_conn->mutex);
935 isert_info("conn %p completing wait\n", isert_conn);
936 complete(&isert_conn->wait);
941 mutex_lock(&isert_np->mutex);
942 if (!list_empty(&isert_conn->node)) {
943 list_del_init(&isert_conn->node);
944 isert_put_conn(isert_conn);
945 queue_work(isert_release_wq, &isert_conn->release_work);
947 mutex_unlock(&isert_np->mutex);
954 isert_connect_error(struct rdma_cm_id *cma_id)
956 struct isert_conn *isert_conn = cma_id->qp->qp_context;
958 list_del_init(&isert_conn->node);
959 isert_conn->cm_id = NULL;
960 isert_put_conn(isert_conn);
966 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
970 isert_info("%s (%d): status %d id %p np %p\n",
971 rdma_event_msg(event->event), event->event,
972 event->status, cma_id, cma_id->context);
974 switch (event->event) {
975 case RDMA_CM_EVENT_CONNECT_REQUEST:
976 ret = isert_connect_request(cma_id, event);
978 isert_err("failed handle connect request %d\n", ret);
980 case RDMA_CM_EVENT_ESTABLISHED:
981 isert_connected_handler(cma_id);
983 case RDMA_CM_EVENT_ADDR_CHANGE: /* FALLTHRU */
984 case RDMA_CM_EVENT_DISCONNECTED: /* FALLTHRU */
985 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
986 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
987 ret = isert_disconnected_handler(cma_id, event->event);
989 case RDMA_CM_EVENT_REJECTED: /* FALLTHRU */
990 case RDMA_CM_EVENT_UNREACHABLE: /* FALLTHRU */
991 case RDMA_CM_EVENT_CONNECT_ERROR:
992 ret = isert_connect_error(cma_id);
995 isert_err("Unhandled RDMA CMA event: %d\n", event->event);
1003 isert_post_recvm(struct isert_conn *isert_conn, u32 count)
1005 struct ib_recv_wr *rx_wr, *rx_wr_failed;
1007 struct iser_rx_desc *rx_desc;
1009 for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
1010 rx_desc = &isert_conn->rx_descs[i];
1011 rx_wr->wr_id = (uintptr_t)rx_desc;
1012 rx_wr->sg_list = &rx_desc->rx_sg;
1014 rx_wr->next = rx_wr + 1;
1017 rx_wr->next = NULL; /* mark end of work requests list */
1019 isert_conn->post_recv_buf_count += count;
1020 ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
1023 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1024 isert_conn->post_recv_buf_count -= count;
1031 isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
1033 struct ib_recv_wr *rx_wr_failed, rx_wr;
1036 rx_wr.wr_id = (uintptr_t)rx_desc;
1037 rx_wr.sg_list = &rx_desc->rx_sg;
1041 isert_conn->post_recv_buf_count++;
1042 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
1044 isert_err("ib_post_recv() failed with ret: %d\n", ret);
1045 isert_conn->post_recv_buf_count--;
1052 isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
1054 struct ib_device *ib_dev = isert_conn->cm_id->device;
1055 struct ib_send_wr send_wr, *send_wr_failed;
1058 ib_dma_sync_single_for_device(ib_dev, tx_desc->dma_addr,
1059 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1061 send_wr.next = NULL;
1062 send_wr.wr_id = (uintptr_t)tx_desc;
1063 send_wr.sg_list = tx_desc->tx_sg;
1064 send_wr.num_sge = tx_desc->num_sge;
1065 send_wr.opcode = IB_WR_SEND;
1066 send_wr.send_flags = IB_SEND_SIGNALED;
1068 ret = ib_post_send(isert_conn->qp, &send_wr, &send_wr_failed);
1070 isert_err("ib_post_send() failed, ret: %d\n", ret);
1076 isert_create_send_desc(struct isert_conn *isert_conn,
1077 struct isert_cmd *isert_cmd,
1078 struct iser_tx_desc *tx_desc)
1080 struct isert_device *device = isert_conn->device;
1081 struct ib_device *ib_dev = device->ib_device;
1083 ib_dma_sync_single_for_cpu(ib_dev, tx_desc->dma_addr,
1084 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1086 memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
1087 tx_desc->iser_header.flags = ISER_VER;
1089 tx_desc->num_sge = 1;
1090 tx_desc->isert_cmd = isert_cmd;
1092 if (tx_desc->tx_sg[0].lkey != device->pd->local_dma_lkey) {
1093 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
1094 isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
1099 isert_init_tx_hdrs(struct isert_conn *isert_conn,
1100 struct iser_tx_desc *tx_desc)
1102 struct isert_device *device = isert_conn->device;
1103 struct ib_device *ib_dev = device->ib_device;
1106 dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
1107 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1108 if (ib_dma_mapping_error(ib_dev, dma_addr)) {
1109 isert_err("ib_dma_mapping_error() failed\n");
1113 tx_desc->dma_addr = dma_addr;
1114 tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
1115 tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
1116 tx_desc->tx_sg[0].lkey = device->pd->local_dma_lkey;
1118 isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
1119 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
1120 tx_desc->tx_sg[0].lkey);
1126 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1127 struct ib_send_wr *send_wr)
1129 struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
1131 isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
1132 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
1133 send_wr->opcode = IB_WR_SEND;
1134 send_wr->sg_list = &tx_desc->tx_sg[0];
1135 send_wr->num_sge = isert_cmd->tx_desc.num_sge;
1136 send_wr->send_flags = IB_SEND_SIGNALED;
1140 isert_rdma_post_recvl(struct isert_conn *isert_conn)
1142 struct ib_recv_wr rx_wr, *rx_wr_fail;
1146 memset(&sge, 0, sizeof(struct ib_sge));
1147 sge.addr = isert_conn->login_req_dma;
1148 sge.length = ISER_RX_LOGIN_SIZE;
1149 sge.lkey = isert_conn->device->pd->local_dma_lkey;
1151 isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
1152 sge.addr, sge.length, sge.lkey);
1154 memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
1155 rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
1156 rx_wr.sg_list = &sge;
1159 isert_conn->post_recv_buf_count++;
1160 ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_fail);
1162 isert_err("ib_post_recv() failed: %d\n", ret);
1163 isert_conn->post_recv_buf_count--;
1170 isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
1173 struct isert_conn *isert_conn = conn->context;
1174 struct isert_device *device = isert_conn->device;
1175 struct ib_device *ib_dev = device->ib_device;
1176 struct iser_tx_desc *tx_desc = &isert_conn->login_tx_desc;
1179 isert_create_send_desc(isert_conn, NULL, tx_desc);
1181 memcpy(&tx_desc->iscsi_header, &login->rsp[0],
1182 sizeof(struct iscsi_hdr));
1184 isert_init_tx_hdrs(isert_conn, tx_desc);
1187 struct ib_sge *tx_dsg = &tx_desc->tx_sg[1];
1189 ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_rsp_dma,
1190 length, DMA_TO_DEVICE);
1192 memcpy(isert_conn->login_rsp_buf, login->rsp_buf, length);
1194 ib_dma_sync_single_for_device(ib_dev, isert_conn->login_rsp_dma,
1195 length, DMA_TO_DEVICE);
1197 tx_dsg->addr = isert_conn->login_rsp_dma;
1198 tx_dsg->length = length;
1199 tx_dsg->lkey = isert_conn->device->pd->local_dma_lkey;
1200 tx_desc->num_sge = 2;
1202 if (!login->login_failed) {
1203 if (login->login_complete) {
1204 if (!conn->sess->sess_ops->SessionType &&
1205 isert_conn->device->use_fastreg) {
1206 ret = isert_conn_create_fastreg_pool(isert_conn);
1208 isert_err("Conn: %p failed to create"
1209 " fastreg pool\n", isert_conn);
1214 ret = isert_alloc_rx_descriptors(isert_conn);
1218 ret = isert_post_recvm(isert_conn,
1219 ISERT_QP_MAX_RECV_DTOS);
1223 /* Now we are in FULL_FEATURE phase */
1224 mutex_lock(&isert_conn->mutex);
1225 isert_conn->state = ISER_CONN_FULL_FEATURE;
1226 mutex_unlock(&isert_conn->mutex);
1230 ret = isert_rdma_post_recvl(isert_conn);
1235 ret = isert_post_send(isert_conn, tx_desc);
1243 isert_rx_login_req(struct isert_conn *isert_conn)
1245 struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
1246 int rx_buflen = isert_conn->login_req_len;
1247 struct iscsi_conn *conn = isert_conn->conn;
1248 struct iscsi_login *login = conn->conn_login;
1251 isert_info("conn %p\n", isert_conn);
1253 WARN_ON_ONCE(!login);
1255 if (login->first_request) {
1256 struct iscsi_login_req *login_req =
1257 (struct iscsi_login_req *)&rx_desc->iscsi_header;
1259 * Setup the initial iscsi_login values from the leading
1260 * login request PDU.
1262 login->leading_connection = (!login_req->tsih) ? 1 : 0;
1263 login->current_stage =
1264 (login_req->flags & ISCSI_FLAG_LOGIN_CURRENT_STAGE_MASK)
1266 login->version_min = login_req->min_version;
1267 login->version_max = login_req->max_version;
1268 memcpy(login->isid, login_req->isid, 6);
1269 login->cmd_sn = be32_to_cpu(login_req->cmdsn);
1270 login->init_task_tag = login_req->itt;
1271 login->initial_exp_statsn = be32_to_cpu(login_req->exp_statsn);
1272 login->cid = be16_to_cpu(login_req->cid);
1273 login->tsih = be16_to_cpu(login_req->tsih);
1276 memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
1278 size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
1279 isert_dbg("Using login payload size: %d, rx_buflen: %d "
1280 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
1281 MAX_KEY_VALUE_PAIRS);
1282 memcpy(login->req_buf, &rx_desc->data[0], size);
1284 if (login->first_request) {
1285 complete(&isert_conn->login_comp);
1288 schedule_delayed_work(&conn->login_work, 0);
1291 static struct iscsi_cmd
1292 *isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
1294 struct isert_conn *isert_conn = conn->context;
1295 struct isert_cmd *isert_cmd;
1296 struct iscsi_cmd *cmd;
1298 cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
1300 isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
1303 isert_cmd = iscsit_priv_cmd(cmd);
1304 isert_cmd->conn = isert_conn;
1305 isert_cmd->iscsi_cmd = cmd;
1306 isert_cmd->rx_desc = rx_desc;
1312 isert_handle_scsi_cmd(struct isert_conn *isert_conn,
1313 struct isert_cmd *isert_cmd, struct iscsi_cmd *cmd,
1314 struct iser_rx_desc *rx_desc, unsigned char *buf)
1316 struct iscsi_conn *conn = isert_conn->conn;
1317 struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
1318 int imm_data, imm_data_len, unsol_data, sg_nents, rc;
1319 bool dump_payload = false;
1320 unsigned int data_len;
1322 rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
1326 imm_data = cmd->immediate_data;
1327 imm_data_len = cmd->first_burst_len;
1328 unsol_data = cmd->unsolicited_data;
1329 data_len = cmd->se_cmd.data_length;
1331 if (imm_data && imm_data_len == data_len)
1332 cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1333 rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
1336 } else if (rc > 0) {
1337 dump_payload = true;
1344 if (imm_data_len != data_len) {
1345 sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
1346 sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
1347 &rx_desc->data[0], imm_data_len);
1348 isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
1349 sg_nents, imm_data_len);
1351 sg_init_table(&isert_cmd->sg, 1);
1352 cmd->se_cmd.t_data_sg = &isert_cmd->sg;
1353 cmd->se_cmd.t_data_nents = 1;
1354 sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
1355 isert_dbg("Transfer Immediate imm_data_len: %d\n",
1359 cmd->write_data_done += imm_data_len;
1361 if (cmd->write_data_done == cmd->se_cmd.data_length) {
1362 spin_lock_bh(&cmd->istate_lock);
1363 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1364 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1365 spin_unlock_bh(&cmd->istate_lock);
1369 rc = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
1371 if (!rc && dump_payload == false && unsol_data)
1372 iscsit_set_unsoliticed_dataout(cmd);
1373 else if (dump_payload && imm_data)
1374 target_put_sess_cmd(&cmd->se_cmd);
1380 isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
1381 struct iser_rx_desc *rx_desc, unsigned char *buf)
1383 struct scatterlist *sg_start;
1384 struct iscsi_conn *conn = isert_conn->conn;
1385 struct iscsi_cmd *cmd = NULL;
1386 struct iscsi_data *hdr = (struct iscsi_data *)buf;
1387 u32 unsol_data_len = ntoh24(hdr->dlength);
1388 int rc, sg_nents, sg_off, page_off;
1390 rc = iscsit_check_dataout_hdr(conn, buf, &cmd);
1396 * FIXME: Unexpected unsolicited_data out
1398 if (!cmd->unsolicited_data) {
1399 isert_err("Received unexpected solicited data payload\n");
1404 isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
1405 "write_data_done: %u, data_length: %u\n",
1406 unsol_data_len, cmd->write_data_done,
1407 cmd->se_cmd.data_length);
1409 sg_off = cmd->write_data_done / PAGE_SIZE;
1410 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
1411 sg_nents = max(1UL, DIV_ROUND_UP(unsol_data_len, PAGE_SIZE));
1412 page_off = cmd->write_data_done % PAGE_SIZE;
1414 * FIXME: Non page-aligned unsolicited_data out
1417 isert_err("unexpected non-page aligned data payload\n");
1421 isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
1422 "sg_nents: %u from %p %u\n", sg_start, sg_off,
1423 sg_nents, &rx_desc->data[0], unsol_data_len);
1425 sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
1428 rc = iscsit_check_dataout_payload(cmd, hdr, false);
1433 * multiple data-outs on the same command can arrive -
1434 * so post the buffer before hand
1436 rc = isert_post_recv(isert_conn, rx_desc);
1438 isert_err("ib_post_recv failed with %d\n", rc);
1445 isert_handle_nop_out(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1446 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1449 struct iscsi_conn *conn = isert_conn->conn;
1450 struct iscsi_nopout *hdr = (struct iscsi_nopout *)buf;
1453 rc = iscsit_setup_nop_out(conn, cmd, hdr);
1457 * FIXME: Add support for NOPOUT payload using unsolicited RDMA payload
1460 return iscsit_process_nop_out(conn, cmd, hdr);
1464 isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1465 struct iscsi_cmd *cmd, struct iser_rx_desc *rx_desc,
1466 struct iscsi_text *hdr)
1468 struct iscsi_conn *conn = isert_conn->conn;
1469 u32 payload_length = ntoh24(hdr->dlength);
1471 unsigned char *text_in = NULL;
1473 rc = iscsit_setup_text_cmd(conn, cmd, hdr);
1477 if (payload_length) {
1478 text_in = kzalloc(payload_length, GFP_KERNEL);
1480 isert_err("Unable to allocate text_in of payload_length: %u\n",
1485 cmd->text_in_ptr = text_in;
1487 memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
1489 return iscsit_process_text_cmd(conn, cmd, hdr);
1493 isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
1494 uint32_t read_stag, uint64_t read_va,
1495 uint32_t write_stag, uint64_t write_va)
1497 struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
1498 struct iscsi_conn *conn = isert_conn->conn;
1499 struct iscsi_cmd *cmd;
1500 struct isert_cmd *isert_cmd;
1502 u8 opcode = (hdr->opcode & ISCSI_OPCODE_MASK);
1504 if (conn->sess->sess_ops->SessionType &&
1505 (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
1506 isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
1507 " ignoring\n", opcode);
1512 case ISCSI_OP_SCSI_CMD:
1513 cmd = isert_allocate_cmd(conn, rx_desc);
1517 isert_cmd = iscsit_priv_cmd(cmd);
1518 isert_cmd->read_stag = read_stag;
1519 isert_cmd->read_va = read_va;
1520 isert_cmd->write_stag = write_stag;
1521 isert_cmd->write_va = write_va;
1523 ret = isert_handle_scsi_cmd(isert_conn, isert_cmd, cmd,
1524 rx_desc, (unsigned char *)hdr);
1526 case ISCSI_OP_NOOP_OUT:
1527 cmd = isert_allocate_cmd(conn, rx_desc);
1531 isert_cmd = iscsit_priv_cmd(cmd);
1532 ret = isert_handle_nop_out(isert_conn, isert_cmd, cmd,
1533 rx_desc, (unsigned char *)hdr);
1535 case ISCSI_OP_SCSI_DATA_OUT:
1536 ret = isert_handle_iscsi_dataout(isert_conn, rx_desc,
1537 (unsigned char *)hdr);
1539 case ISCSI_OP_SCSI_TMFUNC:
1540 cmd = isert_allocate_cmd(conn, rx_desc);
1544 ret = iscsit_handle_task_mgt_cmd(conn, cmd,
1545 (unsigned char *)hdr);
1547 case ISCSI_OP_LOGOUT:
1548 cmd = isert_allocate_cmd(conn, rx_desc);
1552 ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
1555 if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
1556 cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
1558 cmd = isert_allocate_cmd(conn, rx_desc);
1563 isert_cmd = iscsit_priv_cmd(cmd);
1564 ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
1565 rx_desc, (struct iscsi_text *)hdr);
1568 isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
1577 isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
1579 struct iser_hdr *iser_hdr = &rx_desc->iser_header;
1580 uint64_t read_va = 0, write_va = 0;
1581 uint32_t read_stag = 0, write_stag = 0;
1584 switch (iser_hdr->flags & 0xF0) {
1586 if (iser_hdr->flags & ISER_RSV) {
1587 read_stag = be32_to_cpu(iser_hdr->read_stag);
1588 read_va = be64_to_cpu(iser_hdr->read_va);
1589 isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
1590 read_stag, (unsigned long long)read_va);
1592 if (iser_hdr->flags & ISER_WSV) {
1593 write_stag = be32_to_cpu(iser_hdr->write_stag);
1594 write_va = be64_to_cpu(iser_hdr->write_va);
1595 isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
1596 write_stag, (unsigned long long)write_va);
1599 isert_dbg("ISER ISCSI_CTRL PDU\n");
1602 isert_err("iSER Hello message\n");
1605 isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
1609 rc = isert_rx_opcode(isert_conn, rx_desc,
1610 read_stag, read_va, write_stag, write_va);
1614 isert_rcv_completion(struct iser_rx_desc *desc,
1615 struct isert_conn *isert_conn,
1618 struct ib_device *ib_dev = isert_conn->cm_id->device;
1619 struct iscsi_hdr *hdr;
1623 if ((char *)desc == isert_conn->login_req_buf) {
1624 rx_dma = isert_conn->login_req_dma;
1625 rx_buflen = ISER_RX_LOGIN_SIZE;
1626 isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1629 rx_dma = desc->dma_addr;
1630 rx_buflen = ISER_RX_PAYLOAD_SIZE;
1631 isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
1635 ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
1637 hdr = &desc->iscsi_header;
1638 isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
1639 hdr->opcode, hdr->itt, hdr->flags,
1640 (int)(xfer_len - ISER_HEADERS_LEN));
1642 if ((char *)desc == isert_conn->login_req_buf) {
1643 isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
1644 if (isert_conn->conn) {
1645 struct iscsi_login *login = isert_conn->conn->conn_login;
1647 if (login && !login->first_request)
1648 isert_rx_login_req(isert_conn);
1650 mutex_lock(&isert_conn->mutex);
1651 complete(&isert_conn->login_req_comp);
1652 mutex_unlock(&isert_conn->mutex);
1654 isert_rx_do_work(desc, isert_conn);
1657 ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
1660 isert_conn->post_recv_buf_count--;
1664 isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
1665 struct scatterlist *sg, u32 nents, u32 length, u32 offset,
1666 enum iser_ib_op_code op, struct isert_data_buf *data)
1668 struct ib_device *ib_dev = isert_conn->cm_id->device;
1670 data->dma_dir = op == ISER_IB_RDMA_WRITE ?
1671 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1673 data->len = length - offset;
1674 data->offset = offset;
1675 data->sg_off = data->offset / PAGE_SIZE;
1677 data->sg = &sg[data->sg_off];
1678 data->nents = min_t(unsigned int, nents - data->sg_off,
1679 ISCSI_ISER_SG_TABLESIZE);
1680 data->len = min_t(unsigned int, data->len, ISCSI_ISER_SG_TABLESIZE *
1683 data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
1685 if (unlikely(!data->dma_nents)) {
1686 isert_err("Cmd: unable to dma map SGs %p\n", sg);
1690 isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
1691 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
1697 isert_unmap_data_buf(struct isert_conn *isert_conn, struct isert_data_buf *data)
1699 struct ib_device *ib_dev = isert_conn->cm_id->device;
1701 ib_dma_unmap_sg(ib_dev, data->sg, data->nents, data->dma_dir);
1702 memset(data, 0, sizeof(*data));
1708 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1710 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1712 isert_dbg("Cmd %p\n", isert_cmd);
1715 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1716 isert_unmap_data_buf(isert_conn, &wr->data);
1720 isert_dbg("Cmd %p free send_wr\n", isert_cmd);
1726 isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
1733 isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
1735 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1737 isert_dbg("Cmd %p\n", isert_cmd);
1740 isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
1741 if (wr->fr_desc->ind & ISERT_PROTECTED) {
1742 isert_unmap_data_buf(isert_conn, &wr->prot);
1743 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1745 spin_lock_bh(&isert_conn->pool_lock);
1746 list_add_tail(&wr->fr_desc->list, &isert_conn->fr_pool);
1747 spin_unlock_bh(&isert_conn->pool_lock);
1752 isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
1753 isert_unmap_data_buf(isert_conn, &wr->data);
1761 isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
1763 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1764 struct isert_conn *isert_conn = isert_cmd->conn;
1765 struct iscsi_conn *conn = isert_conn->conn;
1766 struct isert_device *device = isert_conn->device;
1767 struct iscsi_text_rsp *hdr;
1769 isert_dbg("Cmd %p\n", isert_cmd);
1771 switch (cmd->iscsi_opcode) {
1772 case ISCSI_OP_SCSI_CMD:
1773 spin_lock_bh(&conn->cmd_lock);
1774 if (!list_empty(&cmd->i_conn_node))
1775 list_del_init(&cmd->i_conn_node);
1776 spin_unlock_bh(&conn->cmd_lock);
1778 if (cmd->data_direction == DMA_TO_DEVICE) {
1779 iscsit_stop_dataout_timer(cmd);
1781 * Check for special case during comp_err where
1782 * WRITE_PENDING has been handed off from core,
1783 * but requires an extra target_put_sess_cmd()
1784 * before transport_generic_free_cmd() below.
1787 cmd->se_cmd.t_state == TRANSPORT_WRITE_PENDING) {
1788 struct se_cmd *se_cmd = &cmd->se_cmd;
1790 target_put_sess_cmd(se_cmd);
1794 device->unreg_rdma_mem(isert_cmd, isert_conn);
1795 transport_generic_free_cmd(&cmd->se_cmd, 0);
1797 case ISCSI_OP_SCSI_TMFUNC:
1798 spin_lock_bh(&conn->cmd_lock);
1799 if (!list_empty(&cmd->i_conn_node))
1800 list_del_init(&cmd->i_conn_node);
1801 spin_unlock_bh(&conn->cmd_lock);
1803 transport_generic_free_cmd(&cmd->se_cmd, 0);
1805 case ISCSI_OP_REJECT:
1806 case ISCSI_OP_NOOP_OUT:
1808 hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
1809 /* If the continue bit is on, keep the command alive */
1810 if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
1813 spin_lock_bh(&conn->cmd_lock);
1814 if (!list_empty(&cmd->i_conn_node))
1815 list_del_init(&cmd->i_conn_node);
1816 spin_unlock_bh(&conn->cmd_lock);
1819 * Handle special case for REJECT when iscsi_add_reject*() has
1820 * overwritten the original iscsi_opcode assignment, and the
1821 * associated cmd->se_cmd needs to be released.
1823 if (cmd->se_cmd.se_tfo != NULL) {
1824 isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
1826 transport_generic_free_cmd(&cmd->se_cmd, 0);
1833 iscsit_release_cmd(cmd);
1839 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
1841 if (tx_desc->dma_addr != 0) {
1842 isert_dbg("unmap single for tx_desc->dma_addr\n");
1843 ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
1844 ISER_HEADERS_LEN, DMA_TO_DEVICE);
1845 tx_desc->dma_addr = 0;
1850 isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
1851 struct ib_device *ib_dev, bool comp_err)
1853 if (isert_cmd->pdu_buf_dma != 0) {
1854 isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
1855 ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
1856 isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
1857 isert_cmd->pdu_buf_dma = 0;
1860 isert_unmap_tx_desc(tx_desc, ib_dev);
1861 isert_put_cmd(isert_cmd, comp_err);
1865 isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
1867 struct ib_mr_status mr_status;
1870 ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
1872 isert_err("ib_check_mr_status failed, ret %d\n", ret);
1873 goto fail_mr_status;
1876 if (mr_status.fail_status & IB_MR_CHECK_SIG_STATUS) {
1878 u32 block_size = se_cmd->se_dev->dev_attrib.block_size + 8;
1880 switch (mr_status.sig_err.err_type) {
1881 case IB_SIG_BAD_GUARD:
1882 se_cmd->pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
1884 case IB_SIG_BAD_REFTAG:
1885 se_cmd->pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
1887 case IB_SIG_BAD_APPTAG:
1888 se_cmd->pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
1891 sec_offset_err = mr_status.sig_err.sig_err_offset;
1892 do_div(sec_offset_err, block_size);
1893 se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
1895 isert_err("PI error found type %d at sector 0x%llx "
1896 "expected 0x%x vs actual 0x%x\n",
1897 mr_status.sig_err.err_type,
1898 (unsigned long long)se_cmd->bad_sector,
1899 mr_status.sig_err.expected,
1900 mr_status.sig_err.actual);
1909 isert_completion_rdma_write(struct iser_tx_desc *tx_desc,
1910 struct isert_cmd *isert_cmd)
1912 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1913 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1914 struct se_cmd *se_cmd = &cmd->se_cmd;
1915 struct isert_conn *isert_conn = isert_cmd->conn;
1916 struct isert_device *device = isert_conn->device;
1919 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1920 ret = isert_check_pi_status(se_cmd,
1921 wr->fr_desc->pi_ctx->sig_mr);
1922 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1925 device->unreg_rdma_mem(isert_cmd, isert_conn);
1926 wr->send_wr_num = 0;
1928 transport_send_check_condition_and_sense(se_cmd,
1931 isert_put_response(isert_conn->conn, cmd);
1935 isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
1936 struct isert_cmd *isert_cmd)
1938 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
1939 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1940 struct se_cmd *se_cmd = &cmd->se_cmd;
1941 struct isert_conn *isert_conn = isert_cmd->conn;
1942 struct isert_device *device = isert_conn->device;
1945 if (wr->fr_desc && wr->fr_desc->ind & ISERT_PROTECTED) {
1946 ret = isert_check_pi_status(se_cmd,
1947 wr->fr_desc->pi_ctx->sig_mr);
1948 wr->fr_desc->ind &= ~ISERT_PROTECTED;
1951 iscsit_stop_dataout_timer(cmd);
1952 device->unreg_rdma_mem(isert_cmd, isert_conn);
1953 cmd->write_data_done = wr->data.len;
1954 wr->send_wr_num = 0;
1956 isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
1957 spin_lock_bh(&cmd->istate_lock);
1958 cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
1959 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
1960 spin_unlock_bh(&cmd->istate_lock);
1963 target_put_sess_cmd(se_cmd);
1964 transport_send_check_condition_and_sense(se_cmd,
1967 target_execute_cmd(se_cmd);
1972 isert_do_control_comp(struct work_struct *work)
1974 struct isert_cmd *isert_cmd = container_of(work,
1975 struct isert_cmd, comp_work);
1976 struct isert_conn *isert_conn = isert_cmd->conn;
1977 struct ib_device *ib_dev = isert_conn->cm_id->device;
1978 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
1980 isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
1982 switch (cmd->i_state) {
1983 case ISTATE_SEND_TASKMGTRSP:
1984 iscsit_tmr_post_handler(cmd, cmd->conn);
1985 case ISTATE_SEND_REJECT: /* FALLTHRU */
1986 case ISTATE_SEND_TEXTRSP: /* FALLTHRU */
1987 cmd->i_state = ISTATE_SENT_STATUS;
1988 isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
1991 case ISTATE_SEND_LOGOUTRSP:
1992 iscsit_logout_post_handler(cmd, cmd->conn);
1995 isert_err("Unknown i_state %d\n", cmd->i_state);
2002 isert_response_completion(struct iser_tx_desc *tx_desc,
2003 struct isert_cmd *isert_cmd,
2004 struct isert_conn *isert_conn,
2005 struct ib_device *ib_dev)
2007 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2009 if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
2010 cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
2011 cmd->i_state == ISTATE_SEND_REJECT ||
2012 cmd->i_state == ISTATE_SEND_TEXTRSP) {
2013 isert_unmap_tx_desc(tx_desc, ib_dev);
2015 INIT_WORK(&isert_cmd->comp_work, isert_do_control_comp);
2016 queue_work(isert_comp_wq, &isert_cmd->comp_work);
2020 cmd->i_state = ISTATE_SENT_STATUS;
2021 isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
2025 isert_snd_completion(struct iser_tx_desc *tx_desc,
2026 struct isert_conn *isert_conn)
2028 struct ib_device *ib_dev = isert_conn->cm_id->device;
2029 struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
2030 struct isert_rdma_wr *wr;
2033 isert_unmap_tx_desc(tx_desc, ib_dev);
2036 wr = &isert_cmd->rdma_wr;
2038 isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
2040 switch (wr->iser_ib_op) {
2042 isert_response_completion(tx_desc, isert_cmd,
2043 isert_conn, ib_dev);
2045 case ISER_IB_RDMA_WRITE:
2046 isert_completion_rdma_write(tx_desc, isert_cmd);
2048 case ISER_IB_RDMA_READ:
2049 isert_completion_rdma_read(tx_desc, isert_cmd);
2052 isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
2059 * is_isert_tx_desc() - Indicate if the completion wr_id
2060 * is a TX descriptor or not.
2061 * @isert_conn: iser connection
2062 * @wr_id: completion WR identifier
2064 * Since we cannot rely on wc opcode in FLUSH errors
2065 * we must work around it by checking if the wr_id address
2066 * falls in the iser connection rx_descs buffer. If so
2067 * it is an RX descriptor, otherwize it is a TX.
2070 is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
2072 void *start = isert_conn->rx_descs;
2073 int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->rx_descs);
2075 if (wr_id >= start && wr_id < start + len)
2082 isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
2084 if (wc->wr_id == ISER_BEACON_WRID) {
2085 isert_info("conn %p completing wait_comp_err\n",
2087 complete(&isert_conn->wait_comp_err);
2088 } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
2089 struct ib_device *ib_dev = isert_conn->cm_id->device;
2090 struct isert_cmd *isert_cmd;
2091 struct iser_tx_desc *desc;
2093 desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2094 isert_cmd = desc->isert_cmd;
2096 isert_unmap_tx_desc(desc, ib_dev);
2098 isert_completion_put(desc, isert_cmd, ib_dev, true);
2100 isert_conn->post_recv_buf_count--;
2101 if (!isert_conn->post_recv_buf_count)
2102 iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
2107 isert_handle_wc(struct ib_wc *wc)
2109 struct isert_conn *isert_conn;
2110 struct iser_tx_desc *tx_desc;
2111 struct iser_rx_desc *rx_desc;
2113 isert_conn = wc->qp->qp_context;
2114 if (likely(wc->status == IB_WC_SUCCESS)) {
2115 if (wc->opcode == IB_WC_RECV) {
2116 rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
2117 isert_rcv_completion(rx_desc, isert_conn, wc->byte_len);
2119 tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
2120 isert_snd_completion(tx_desc, isert_conn);
2123 if (wc->status != IB_WC_WR_FLUSH_ERR)
2124 isert_err("%s (%d): wr id %llx vend_err %x\n",
2125 ib_wc_status_msg(wc->status), wc->status,
2126 wc->wr_id, wc->vendor_err);
2128 isert_dbg("%s (%d): wr id %llx\n",
2129 ib_wc_status_msg(wc->status), wc->status,
2132 if (wc->wr_id != ISER_FASTREG_LI_WRID)
2133 isert_cq_comp_err(isert_conn, wc);
2138 isert_cq_work(struct work_struct *work)
2140 enum { isert_poll_budget = 65536 };
2141 struct isert_comp *comp = container_of(work, struct isert_comp,
2143 struct ib_wc *const wcs = comp->wcs;
2144 int i, n, completed = 0;
2146 while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
2147 for (i = 0; i < n; i++)
2148 isert_handle_wc(&wcs[i]);
2151 if (completed >= isert_poll_budget)
2155 ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
2159 isert_cq_callback(struct ib_cq *cq, void *context)
2161 struct isert_comp *comp = context;
2163 queue_work(isert_comp_wq, &comp->work);
2167 isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
2169 struct ib_send_wr *wr_failed;
2172 ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2174 isert_err("ib_post_recv failed with %d\n", ret);
2178 ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
2181 isert_err("ib_post_send failed with %d\n", ret);
2188 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2190 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2191 struct isert_conn *isert_conn = conn->context;
2192 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2193 struct iscsi_scsi_rsp *hdr = (struct iscsi_scsi_rsp *)
2194 &isert_cmd->tx_desc.iscsi_header;
2196 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2197 iscsit_build_rsp_pdu(cmd, conn, true, hdr);
2198 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2200 * Attach SENSE DATA payload to iSCSI Response PDU
2202 if (cmd->se_cmd.sense_buffer &&
2203 ((cmd->se_cmd.se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
2204 (cmd->se_cmd.se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
2205 struct isert_device *device = isert_conn->device;
2206 struct ib_device *ib_dev = device->ib_device;
2207 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2208 u32 padding, pdu_len;
2210 put_unaligned_be16(cmd->se_cmd.scsi_sense_length,
2212 cmd->se_cmd.scsi_sense_length += sizeof(__be16);
2214 padding = -(cmd->se_cmd.scsi_sense_length) & 3;
2215 hton24(hdr->dlength, (u32)cmd->se_cmd.scsi_sense_length);
2216 pdu_len = cmd->se_cmd.scsi_sense_length + padding;
2218 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2219 (void *)cmd->sense_buffer, pdu_len,
2222 isert_cmd->pdu_buf_len = pdu_len;
2223 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2224 tx_dsg->length = pdu_len;
2225 tx_dsg->lkey = device->pd->local_dma_lkey;
2226 isert_cmd->tx_desc.num_sge = 2;
2229 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2231 isert_dbg("Posting SCSI Response\n");
2233 return isert_post_response(isert_conn, isert_cmd);
2237 isert_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2239 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2240 struct isert_conn *isert_conn = conn->context;
2241 struct isert_device *device = isert_conn->device;
2243 spin_lock_bh(&conn->cmd_lock);
2244 if (!list_empty(&cmd->i_conn_node))
2245 list_del_init(&cmd->i_conn_node);
2246 spin_unlock_bh(&conn->cmd_lock);
2248 if (cmd->data_direction == DMA_TO_DEVICE)
2249 iscsit_stop_dataout_timer(cmd);
2251 device->unreg_rdma_mem(isert_cmd, isert_conn);
2254 static enum target_prot_op
2255 isert_get_sup_prot_ops(struct iscsi_conn *conn)
2257 struct isert_conn *isert_conn = conn->context;
2258 struct isert_device *device = isert_conn->device;
2260 if (conn->tpg->tpg_attrib.t10_pi) {
2261 if (device->pi_capable) {
2262 isert_info("conn %p PI offload enabled\n", isert_conn);
2263 isert_conn->pi_support = true;
2264 return TARGET_PROT_ALL;
2268 isert_info("conn %p PI offload disabled\n", isert_conn);
2269 isert_conn->pi_support = false;
2271 return TARGET_PROT_NORMAL;
2275 isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
2276 bool nopout_response)
2278 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2279 struct isert_conn *isert_conn = conn->context;
2280 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2282 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2283 iscsit_build_nopin_rsp(cmd, conn, (struct iscsi_nopin *)
2284 &isert_cmd->tx_desc.iscsi_header,
2286 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2287 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2289 isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
2291 return isert_post_response(isert_conn, isert_cmd);
2295 isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2297 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2298 struct isert_conn *isert_conn = conn->context;
2299 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2301 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2302 iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
2303 &isert_cmd->tx_desc.iscsi_header);
2304 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2305 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2307 isert_dbg("conn %p Posting Logout Response\n", isert_conn);
2309 return isert_post_response(isert_conn, isert_cmd);
2313 isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2315 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2316 struct isert_conn *isert_conn = conn->context;
2317 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2319 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2320 iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
2321 &isert_cmd->tx_desc.iscsi_header);
2322 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2323 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2325 isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
2327 return isert_post_response(isert_conn, isert_cmd);
2331 isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2333 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2334 struct isert_conn *isert_conn = conn->context;
2335 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2336 struct isert_device *device = isert_conn->device;
2337 struct ib_device *ib_dev = device->ib_device;
2338 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2339 struct iscsi_reject *hdr =
2340 (struct iscsi_reject *)&isert_cmd->tx_desc.iscsi_header;
2342 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2343 iscsit_build_reject(cmd, conn, hdr);
2344 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2346 hton24(hdr->dlength, ISCSI_HDR_LEN);
2347 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2348 (void *)cmd->buf_ptr, ISCSI_HDR_LEN,
2350 isert_cmd->pdu_buf_len = ISCSI_HDR_LEN;
2351 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2352 tx_dsg->length = ISCSI_HDR_LEN;
2353 tx_dsg->lkey = device->pd->local_dma_lkey;
2354 isert_cmd->tx_desc.num_sge = 2;
2356 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2358 isert_dbg("conn %p Posting Reject\n", isert_conn);
2360 return isert_post_response(isert_conn, isert_cmd);
2364 isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2366 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2367 struct isert_conn *isert_conn = conn->context;
2368 struct ib_send_wr *send_wr = &isert_cmd->tx_desc.send_wr;
2369 struct iscsi_text_rsp *hdr =
2370 (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
2374 isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
2375 rc = iscsit_build_text_rsp(cmd, conn, hdr, ISCSI_INFINIBAND);
2380 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2383 struct isert_device *device = isert_conn->device;
2384 struct ib_device *ib_dev = device->ib_device;
2385 struct ib_sge *tx_dsg = &isert_cmd->tx_desc.tx_sg[1];
2386 void *txt_rsp_buf = cmd->buf_ptr;
2388 isert_cmd->pdu_buf_dma = ib_dma_map_single(ib_dev,
2389 txt_rsp_buf, txt_rsp_len, DMA_TO_DEVICE);
2391 isert_cmd->pdu_buf_len = txt_rsp_len;
2392 tx_dsg->addr = isert_cmd->pdu_buf_dma;
2393 tx_dsg->length = txt_rsp_len;
2394 tx_dsg->lkey = device->pd->local_dma_lkey;
2395 isert_cmd->tx_desc.num_sge = 2;
2397 isert_init_send_wr(isert_conn, isert_cmd, send_wr);
2399 isert_dbg("conn %p Text Response\n", isert_conn);
2401 return isert_post_response(isert_conn, isert_cmd);
2405 isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
2406 struct ib_sge *ib_sge, struct ib_send_wr *send_wr,
2407 u32 data_left, u32 offset)
2409 struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
2410 struct scatterlist *sg_start, *tmp_sg;
2411 struct isert_device *device = isert_conn->device;
2412 struct ib_device *ib_dev = device->ib_device;
2413 u32 sg_off, page_off;
2414 int i = 0, sg_nents;
2416 sg_off = offset / PAGE_SIZE;
2417 sg_start = &cmd->se_cmd.t_data_sg[sg_off];
2418 sg_nents = min(cmd->se_cmd.t_data_nents - sg_off, isert_conn->max_sge);
2419 page_off = offset % PAGE_SIZE;
2421 send_wr->sg_list = ib_sge;
2422 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2424 * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
2426 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2427 isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
2429 (unsigned long long)tmp_sg->dma_address,
2430 tmp_sg->length, page_off);
2432 ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
2433 ib_sge->length = min_t(u32, data_left,
2434 ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
2435 ib_sge->lkey = device->pd->local_dma_lkey;
2437 isert_dbg("RDMA ib_sge: addr: 0x%llx length: %u lkey: %x\n",
2438 ib_sge->addr, ib_sge->length, ib_sge->lkey);
2440 data_left -= ib_sge->length;
2444 isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
2447 send_wr->num_sge = ++i;
2448 isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
2449 send_wr->sg_list, send_wr->num_sge);
2451 return send_wr->num_sge;
2455 isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2456 struct isert_rdma_wr *wr)
2458 struct se_cmd *se_cmd = &cmd->se_cmd;
2459 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2460 struct isert_conn *isert_conn = conn->context;
2461 struct isert_data_buf *data = &wr->data;
2462 struct ib_send_wr *send_wr;
2463 struct ib_sge *ib_sge;
2464 u32 offset, data_len, data_left, rdma_write_max, va_offset = 0;
2465 int ret = 0, i, ib_sge_cnt;
2467 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2469 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2470 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2471 se_cmd->t_data_nents, se_cmd->data_length,
2472 offset, wr->iser_ib_op, &wr->data);
2476 data_left = data->len;
2477 offset = data->offset;
2479 ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
2481 isert_warn("Unable to allocate ib_sge\n");
2485 wr->ib_sge = ib_sge;
2487 wr->send_wr_num = DIV_ROUND_UP(data->nents, isert_conn->max_sge);
2488 wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
2491 isert_dbg("Unable to allocate wr->send_wr\n");
2496 wr->isert_cmd = isert_cmd;
2497 rdma_write_max = isert_conn->max_sge * PAGE_SIZE;
2499 for (i = 0; i < wr->send_wr_num; i++) {
2500 send_wr = &isert_cmd->rdma_wr.send_wr[i];
2501 data_len = min(data_left, rdma_write_max);
2503 send_wr->send_flags = 0;
2504 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2505 send_wr->opcode = IB_WR_RDMA_WRITE;
2506 send_wr->wr.rdma.remote_addr = isert_cmd->read_va + offset;
2507 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2508 if (i + 1 == wr->send_wr_num)
2509 send_wr->next = &isert_cmd->tx_desc.send_wr;
2511 send_wr->next = &wr->send_wr[i + 1];
2513 send_wr->opcode = IB_WR_RDMA_READ;
2514 send_wr->wr.rdma.remote_addr = isert_cmd->write_va + va_offset;
2515 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2516 if (i + 1 == wr->send_wr_num)
2517 send_wr->send_flags = IB_SEND_SIGNALED;
2519 send_wr->next = &wr->send_wr[i + 1];
2522 ib_sge_cnt = isert_build_rdma_wr(isert_conn, isert_cmd, ib_sge,
2523 send_wr, data_len, offset);
2524 ib_sge += ib_sge_cnt;
2527 va_offset += data_len;
2528 data_left -= data_len;
2533 isert_unmap_data_buf(isert_conn, data);
2539 isert_map_fr_pagelist(struct ib_device *ib_dev,
2540 struct scatterlist *sg_start, int sg_nents, u64 *fr_pl)
2542 u64 start_addr, end_addr, page, chunk_start = 0;
2543 struct scatterlist *tmp_sg;
2544 int i = 0, new_chunk, last_ent, n_pages;
2548 last_ent = sg_nents - 1;
2549 for_each_sg(sg_start, tmp_sg, sg_nents, i) {
2550 start_addr = ib_sg_dma_address(ib_dev, tmp_sg);
2552 chunk_start = start_addr;
2553 end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
2555 isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
2556 i, (unsigned long long)tmp_sg->dma_address,
2559 if ((end_addr & ~PAGE_MASK) && i < last_ent) {
2565 page = chunk_start & PAGE_MASK;
2567 fr_pl[n_pages++] = page;
2568 isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
2571 } while (page < end_addr);
2578 isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
2582 memset(inv_wr, 0, sizeof(*inv_wr));
2583 inv_wr->wr_id = ISER_FASTREG_LI_WRID;
2584 inv_wr->opcode = IB_WR_LOCAL_INV;
2585 inv_wr->ex.invalidate_rkey = mr->rkey;
2588 rkey = ib_inc_rkey(mr->rkey);
2589 ib_update_fast_reg_key(mr, rkey);
2593 isert_fast_reg_mr(struct isert_conn *isert_conn,
2594 struct fast_reg_descriptor *fr_desc,
2595 struct isert_data_buf *mem,
2596 enum isert_indicator ind,
2599 struct isert_device *device = isert_conn->device;
2600 struct ib_device *ib_dev = device->ib_device;
2602 struct ib_fast_reg_page_list *frpl;
2603 struct ib_send_wr fr_wr, inv_wr;
2604 struct ib_send_wr *bad_wr, *wr = NULL;
2605 int ret, pagelist_len;
2608 if (mem->dma_nents == 1) {
2609 sge->lkey = device->pd->local_dma_lkey;
2610 sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
2611 sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
2612 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2613 sge->addr, sge->length, sge->lkey);
2617 if (ind == ISERT_DATA_KEY_VALID) {
2618 /* Registering data buffer */
2619 mr = fr_desc->data_mr;
2620 frpl = fr_desc->data_frpl;
2622 /* Registering protection buffer */
2623 mr = fr_desc->pi_ctx->prot_mr;
2624 frpl = fr_desc->pi_ctx->prot_frpl;
2627 page_off = mem->offset % PAGE_SIZE;
2629 isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
2630 fr_desc, mem->nents, mem->offset);
2632 pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
2633 &frpl->page_list[0]);
2635 if (!(fr_desc->ind & ind)) {
2636 isert_inv_rkey(&inv_wr, mr);
2640 /* Prepare FASTREG WR */
2641 memset(&fr_wr, 0, sizeof(fr_wr));
2642 fr_wr.wr_id = ISER_FASTREG_LI_WRID;
2643 fr_wr.opcode = IB_WR_FAST_REG_MR;
2644 fr_wr.wr.fast_reg.iova_start = frpl->page_list[0] + page_off;
2645 fr_wr.wr.fast_reg.page_list = frpl;
2646 fr_wr.wr.fast_reg.page_list_len = pagelist_len;
2647 fr_wr.wr.fast_reg.page_shift = PAGE_SHIFT;
2648 fr_wr.wr.fast_reg.length = mem->len;
2649 fr_wr.wr.fast_reg.rkey = mr->rkey;
2650 fr_wr.wr.fast_reg.access_flags = IB_ACCESS_LOCAL_WRITE;
2657 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2659 isert_err("fast registration failed, ret:%d\n", ret);
2662 fr_desc->ind &= ~ind;
2664 sge->lkey = mr->lkey;
2665 sge->addr = frpl->page_list[0] + page_off;
2666 sge->length = mem->len;
2668 isert_dbg("sge: addr: 0x%llx length: %u lkey: %x\n",
2669 sge->addr, sge->length, sge->lkey);
2675 isert_set_dif_domain(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs,
2676 struct ib_sig_domain *domain)
2678 domain->sig_type = IB_SIG_TYPE_T10_DIF;
2679 domain->sig.dif.bg_type = IB_T10DIF_CRC;
2680 domain->sig.dif.pi_interval = se_cmd->se_dev->dev_attrib.block_size;
2681 domain->sig.dif.ref_tag = se_cmd->reftag_seed;
2683 * At the moment we hard code those, but if in the future
2684 * the target core would like to use it, we will take it
2687 domain->sig.dif.apptag_check_mask = 0xffff;
2688 domain->sig.dif.app_escape = true;
2689 domain->sig.dif.ref_escape = true;
2690 if (se_cmd->prot_type == TARGET_DIF_TYPE1_PROT ||
2691 se_cmd->prot_type == TARGET_DIF_TYPE2_PROT)
2692 domain->sig.dif.ref_remap = true;
2696 isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
2698 switch (se_cmd->prot_op) {
2699 case TARGET_PROT_DIN_INSERT:
2700 case TARGET_PROT_DOUT_STRIP:
2701 sig_attrs->mem.sig_type = IB_SIG_TYPE_NONE;
2702 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2704 case TARGET_PROT_DOUT_INSERT:
2705 case TARGET_PROT_DIN_STRIP:
2706 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE;
2707 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2709 case TARGET_PROT_DIN_PASS:
2710 case TARGET_PROT_DOUT_PASS:
2711 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->wire);
2712 isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
2715 isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
2723 isert_set_prot_checks(u8 prot_checks)
2725 return (prot_checks & TARGET_DIF_CHECK_GUARD ? 0xc0 : 0) |
2726 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x30 : 0) |
2727 (prot_checks & TARGET_DIF_CHECK_REFTAG ? 0x0f : 0);
2731 isert_reg_sig_mr(struct isert_conn *isert_conn,
2732 struct se_cmd *se_cmd,
2733 struct isert_rdma_wr *rdma_wr,
2734 struct fast_reg_descriptor *fr_desc)
2736 struct ib_send_wr sig_wr, inv_wr;
2737 struct ib_send_wr *bad_wr, *wr = NULL;
2738 struct pi_context *pi_ctx = fr_desc->pi_ctx;
2739 struct ib_sig_attrs sig_attrs;
2742 memset(&sig_attrs, 0, sizeof(sig_attrs));
2743 ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
2747 sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
2749 if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
2750 isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
2754 memset(&sig_wr, 0, sizeof(sig_wr));
2755 sig_wr.opcode = IB_WR_REG_SIG_MR;
2756 sig_wr.wr_id = ISER_FASTREG_LI_WRID;
2757 sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
2759 sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
2760 sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
2761 sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
2762 if (se_cmd->t_prot_sg)
2763 sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
2770 ret = ib_post_send(isert_conn->qp, wr, &bad_wr);
2772 isert_err("fast registration failed, ret:%d\n", ret);
2775 fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
2777 rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
2778 rdma_wr->ib_sg[SIG].addr = 0;
2779 rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
2780 if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
2781 se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
2783 * We have protection guards on the wire
2784 * so we need to set a larget transfer
2786 rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
2788 isert_dbg("sig_sge: addr: 0x%llx length: %u lkey: %x\n",
2789 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
2790 rdma_wr->ib_sg[SIG].lkey);
2796 isert_handle_prot_cmd(struct isert_conn *isert_conn,
2797 struct isert_cmd *isert_cmd,
2798 struct isert_rdma_wr *wr)
2800 struct isert_device *device = isert_conn->device;
2801 struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
2804 if (!wr->fr_desc->pi_ctx) {
2805 ret = isert_create_pi_ctx(wr->fr_desc,
2809 isert_err("conn %p failed to allocate pi_ctx\n",
2815 if (se_cmd->t_prot_sg) {
2816 ret = isert_map_data_buf(isert_conn, isert_cmd,
2818 se_cmd->t_prot_nents,
2819 se_cmd->prot_length,
2820 0, wr->iser_ib_op, &wr->prot);
2822 isert_err("conn %p failed to map protection buffer\n",
2827 memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
2828 ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
2829 ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
2831 isert_err("conn %p failed to fast reg mr\n",
2833 goto unmap_prot_cmd;
2837 ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
2839 isert_err("conn %p failed to fast reg mr\n",
2841 goto unmap_prot_cmd;
2843 wr->fr_desc->ind |= ISERT_PROTECTED;
2848 if (se_cmd->t_prot_sg)
2849 isert_unmap_data_buf(isert_conn, &wr->prot);
2855 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2856 struct isert_rdma_wr *wr)
2858 struct se_cmd *se_cmd = &cmd->se_cmd;
2859 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2860 struct isert_conn *isert_conn = conn->context;
2861 struct fast_reg_descriptor *fr_desc = NULL;
2862 struct ib_send_wr *send_wr;
2863 struct ib_sge *ib_sg;
2866 unsigned long flags;
2868 isert_cmd->tx_desc.isert_cmd = isert_cmd;
2870 offset = wr->iser_ib_op == ISER_IB_RDMA_READ ? cmd->write_data_done : 0;
2871 ret = isert_map_data_buf(isert_conn, isert_cmd, se_cmd->t_data_sg,
2872 se_cmd->t_data_nents, se_cmd->data_length,
2873 offset, wr->iser_ib_op, &wr->data);
2877 if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
2878 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2879 fr_desc = list_first_entry(&isert_conn->fr_pool,
2880 struct fast_reg_descriptor, list);
2881 list_del(&fr_desc->list);
2882 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2883 wr->fr_desc = fr_desc;
2886 ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
2887 ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
2891 if (isert_prot_cmd(isert_conn, se_cmd)) {
2892 ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
2896 ib_sg = &wr->ib_sg[SIG];
2898 ib_sg = &wr->ib_sg[DATA];
2901 memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
2902 wr->ib_sge = &wr->s_ib_sge;
2903 wr->send_wr_num = 1;
2904 memset(&wr->s_send_wr, 0, sizeof(*send_wr));
2905 wr->send_wr = &wr->s_send_wr;
2906 wr->isert_cmd = isert_cmd;
2908 send_wr = &isert_cmd->rdma_wr.s_send_wr;
2909 send_wr->sg_list = &wr->s_ib_sge;
2910 send_wr->num_sge = 1;
2911 send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
2912 if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
2913 send_wr->opcode = IB_WR_RDMA_WRITE;
2914 send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
2915 send_wr->wr.rdma.rkey = isert_cmd->read_stag;
2916 send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
2917 0 : IB_SEND_SIGNALED;
2919 send_wr->opcode = IB_WR_RDMA_READ;
2920 send_wr->wr.rdma.remote_addr = isert_cmd->write_va;
2921 send_wr->wr.rdma.rkey = isert_cmd->write_stag;
2922 send_wr->send_flags = IB_SEND_SIGNALED;
2929 spin_lock_irqsave(&isert_conn->pool_lock, flags);
2930 list_add_tail(&fr_desc->list, &isert_conn->fr_pool);
2931 spin_unlock_irqrestore(&isert_conn->pool_lock, flags);
2933 isert_unmap_data_buf(isert_conn, &wr->data);
2939 isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2941 struct se_cmd *se_cmd = &cmd->se_cmd;
2942 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2943 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
2944 struct isert_conn *isert_conn = conn->context;
2945 struct isert_device *device = isert_conn->device;
2946 struct ib_send_wr *wr_failed;
2949 isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
2950 isert_cmd, se_cmd->data_length);
2952 wr->iser_ib_op = ISER_IB_RDMA_WRITE;
2953 rc = device->reg_rdma_mem(conn, cmd, wr);
2955 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
2959 if (!isert_prot_cmd(isert_conn, se_cmd)) {
2961 * Build isert_conn->tx_desc for iSCSI response PDU and attach
2963 isert_create_send_desc(isert_conn, isert_cmd,
2964 &isert_cmd->tx_desc);
2965 iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
2966 &isert_cmd->tx_desc.iscsi_header);
2967 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2968 isert_init_send_wr(isert_conn, isert_cmd,
2969 &isert_cmd->tx_desc.send_wr);
2970 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2971 wr->send_wr_num += 1;
2973 rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
2975 isert_err("ib_post_recv failed with %d\n", rc);
2980 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
2982 isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
2984 if (!isert_prot_cmd(isert_conn, se_cmd))
2985 isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
2986 "READ\n", isert_cmd);
2988 isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
2995 isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
2997 struct se_cmd *se_cmd = &cmd->se_cmd;
2998 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
2999 struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
3000 struct isert_conn *isert_conn = conn->context;
3001 struct isert_device *device = isert_conn->device;
3002 struct ib_send_wr *wr_failed;
3005 isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
3006 isert_cmd, se_cmd->data_length, cmd->write_data_done);
3007 wr->iser_ib_op = ISER_IB_RDMA_READ;
3008 rc = device->reg_rdma_mem(conn, cmd, wr);
3010 isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
3014 rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
3016 isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
3018 isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
3025 isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3027 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3032 spin_lock_bh(&conn->cmd_lock);
3033 list_del_init(&cmd->i_conn_node);
3034 spin_unlock_bh(&conn->cmd_lock);
3035 isert_put_cmd(isert_cmd, true);
3037 case ISTATE_SEND_NOPIN_WANT_RESPONSE:
3038 ret = isert_put_nopin(cmd, conn, false);
3041 isert_err("Unknown immediate state: 0x%02x\n", state);
3050 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
3052 struct isert_conn *isert_conn = conn->context;
3056 case ISTATE_SEND_LOGOUTRSP:
3057 ret = isert_put_logout_rsp(cmd, conn);
3059 isert_conn->logout_posted = true;
3061 case ISTATE_SEND_NOPIN:
3062 ret = isert_put_nopin(cmd, conn, true);
3064 case ISTATE_SEND_TASKMGTRSP:
3065 ret = isert_put_tm_rsp(cmd, conn);
3067 case ISTATE_SEND_REJECT:
3068 ret = isert_put_reject(cmd, conn);
3070 case ISTATE_SEND_TEXTRSP:
3071 ret = isert_put_text_rsp(cmd, conn);
3073 case ISTATE_SEND_STATUS:
3075 * Special case for sending non GOOD SCSI status from TX thread
3076 * context during pre se_cmd excecution failure.
3078 ret = isert_put_response(conn, cmd);
3081 isert_err("Unknown response state: 0x%02x\n", state);
3090 isert_setup_id(struct isert_np *isert_np)
3092 struct iscsi_np *np = isert_np->np;
3093 struct rdma_cm_id *id;
3094 struct sockaddr *sa;
3097 sa = (struct sockaddr *)&np->np_sockaddr;
3098 isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
3100 id = rdma_create_id(isert_cma_handler, isert_np,
3101 RDMA_PS_TCP, IB_QPT_RC);
3103 isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
3107 isert_dbg("id %p context %p\n", id, id->context);
3109 ret = rdma_bind_addr(id, sa);
3111 isert_err("rdma_bind_addr() failed: %d\n", ret);
3115 ret = rdma_listen(id, 0);
3117 isert_err("rdma_listen() failed: %d\n", ret);
3123 rdma_destroy_id(id);
3125 return ERR_PTR(ret);
3129 isert_setup_np(struct iscsi_np *np,
3130 struct sockaddr_storage *ksockaddr)
3132 struct isert_np *isert_np;
3133 struct rdma_cm_id *isert_lid;
3136 isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
3138 isert_err("Unable to allocate struct isert_np\n");
3141 sema_init(&isert_np->sem, 0);
3142 mutex_init(&isert_np->mutex);
3143 INIT_LIST_HEAD(&isert_np->accepted);
3144 INIT_LIST_HEAD(&isert_np->pending);
3148 * Setup the np->np_sockaddr from the passed sockaddr setup
3149 * in iscsi_target_configfs.c code..
3151 memcpy(&np->np_sockaddr, ksockaddr,
3152 sizeof(struct sockaddr_storage));
3154 isert_lid = isert_setup_id(isert_np);
3155 if (IS_ERR(isert_lid)) {
3156 ret = PTR_ERR(isert_lid);
3160 isert_np->cm_id = isert_lid;
3161 np->np_context = isert_np;
3172 isert_rdma_accept(struct isert_conn *isert_conn)
3174 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3175 struct rdma_conn_param cp;
3178 memset(&cp, 0, sizeof(struct rdma_conn_param));
3179 cp.initiator_depth = isert_conn->initiator_depth;
3181 cp.rnr_retry_count = 7;
3183 ret = rdma_accept(cm_id, &cp);
3185 isert_err("rdma_accept() failed with: %d\n", ret);
3193 isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
3195 struct isert_conn *isert_conn = conn->context;
3198 isert_info("before login_req comp conn: %p\n", isert_conn);
3199 ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
3201 isert_err("isert_conn %p interrupted before got login req\n",
3205 reinit_completion(&isert_conn->login_req_comp);
3208 * For login requests after the first PDU, isert_rx_login_req() will
3209 * kick schedule_delayed_work(&conn->login_work) as the packet is
3210 * received, which turns this callback from iscsi_target_do_login_rx()
3213 if (!login->first_request)
3216 isert_rx_login_req(isert_conn);
3218 isert_info("before login_comp conn: %p\n", conn);
3219 ret = wait_for_completion_interruptible(&isert_conn->login_comp);
3223 isert_info("processing login->req: %p\n", login->req);
3229 isert_set_conn_info(struct iscsi_np *np, struct iscsi_conn *conn,
3230 struct isert_conn *isert_conn)
3232 struct rdma_cm_id *cm_id = isert_conn->cm_id;
3233 struct rdma_route *cm_route = &cm_id->route;
3235 conn->login_family = np->np_sockaddr.ss_family;
3237 conn->login_sockaddr = cm_route->addr.dst_addr;
3238 conn->local_sockaddr = cm_route->addr.src_addr;
3242 isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3244 struct isert_np *isert_np = np->np_context;
3245 struct isert_conn *isert_conn;
3249 ret = down_interruptible(&isert_np->sem);
3253 spin_lock_bh(&np->np_thread_lock);
3254 if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
3255 spin_unlock_bh(&np->np_thread_lock);
3256 isert_dbg("np_thread_state %d\n",
3257 np->np_thread_state);
3259 * No point in stalling here when np_thread
3260 * is in state RESET/SHUTDOWN/EXIT - bail
3264 spin_unlock_bh(&np->np_thread_lock);
3266 mutex_lock(&isert_np->mutex);
3267 if (list_empty(&isert_np->pending)) {
3268 mutex_unlock(&isert_np->mutex);
3271 isert_conn = list_first_entry(&isert_np->pending,
3272 struct isert_conn, node);
3273 list_del_init(&isert_conn->node);
3274 mutex_unlock(&isert_np->mutex);
3276 conn->context = isert_conn;
3277 isert_conn->conn = conn;
3279 isert_set_conn_info(np, conn, isert_conn);
3281 isert_dbg("Processing isert_conn: %p\n", isert_conn);
3287 isert_free_np(struct iscsi_np *np)
3289 struct isert_np *isert_np = np->np_context;
3290 struct isert_conn *isert_conn, *n;
3292 if (isert_np->cm_id)
3293 rdma_destroy_id(isert_np->cm_id);
3296 * FIXME: At this point we don't have a good way to insure
3297 * that at this point we don't have hanging connections that
3298 * completed RDMA establishment but didn't start iscsi login
3299 * process. So work-around this by cleaning up what ever piled
3300 * up in accepted and pending lists.
3302 mutex_lock(&isert_np->mutex);
3303 if (!list_empty(&isert_np->pending)) {
3304 isert_info("Still have isert pending connections\n");
3305 list_for_each_entry_safe(isert_conn, n,
3308 isert_info("cleaning isert_conn %p state (%d)\n",
3309 isert_conn, isert_conn->state);
3310 isert_connect_release(isert_conn);
3314 if (!list_empty(&isert_np->accepted)) {
3315 isert_info("Still have isert accepted connections\n");
3316 list_for_each_entry_safe(isert_conn, n,
3317 &isert_np->accepted,
3319 isert_info("cleaning isert_conn %p state (%d)\n",
3320 isert_conn, isert_conn->state);
3321 isert_connect_release(isert_conn);
3324 mutex_unlock(&isert_np->mutex);
3326 np->np_context = NULL;
3330 static void isert_release_work(struct work_struct *work)
3332 struct isert_conn *isert_conn = container_of(work,
3336 isert_info("Starting release conn %p\n", isert_conn);
3338 wait_for_completion(&isert_conn->wait);
3340 mutex_lock(&isert_conn->mutex);
3341 isert_conn->state = ISER_CONN_DOWN;
3342 mutex_unlock(&isert_conn->mutex);
3344 isert_info("Destroying conn %p\n", isert_conn);
3345 isert_put_conn(isert_conn);
3349 isert_wait4logout(struct isert_conn *isert_conn)
3351 struct iscsi_conn *conn = isert_conn->conn;
3353 isert_info("conn %p\n", isert_conn);
3355 if (isert_conn->logout_posted) {
3356 isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
3357 wait_for_completion_timeout(&conn->conn_logout_comp,
3358 SECONDS_FOR_LOGOUT_COMP * HZ);
3363 isert_wait4cmds(struct iscsi_conn *conn)
3365 isert_info("iscsi_conn %p\n", conn);
3368 target_sess_cmd_list_set_waiting(conn->sess->se_sess);
3369 target_wait_for_sess_cmds(conn->sess->se_sess);
3374 isert_wait4flush(struct isert_conn *isert_conn)
3376 struct ib_recv_wr *bad_wr;
3378 isert_info("conn %p\n", isert_conn);
3380 init_completion(&isert_conn->wait_comp_err);
3381 isert_conn->beacon.wr_id = ISER_BEACON_WRID;
3382 /* post an indication that all flush errors were consumed */
3383 if (ib_post_recv(isert_conn->qp, &isert_conn->beacon, &bad_wr)) {
3384 isert_err("conn %p failed to post beacon", isert_conn);
3388 wait_for_completion(&isert_conn->wait_comp_err);
3392 * isert_put_unsol_pending_cmds() - Drop commands waiting for
3393 * unsolicitate dataout
3394 * @conn: iscsi connection
3396 * We might still have commands that are waiting for unsolicited
3397 * dataouts messages. We must put the extra reference on those
3398 * before blocking on the target_wait_for_session_cmds
3401 isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
3403 struct iscsi_cmd *cmd, *tmp;
3404 static LIST_HEAD(drop_cmd_list);
3406 spin_lock_bh(&conn->cmd_lock);
3407 list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
3408 if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
3409 (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
3410 (cmd->write_data_done < cmd->se_cmd.data_length))
3411 list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
3413 spin_unlock_bh(&conn->cmd_lock);
3415 list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
3416 list_del_init(&cmd->i_conn_node);
3417 if (cmd->i_state != ISTATE_REMOVE) {
3418 struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
3420 isert_info("conn %p dropping cmd %p\n", conn, cmd);
3421 isert_put_cmd(isert_cmd, true);
3426 static void isert_wait_conn(struct iscsi_conn *conn)
3428 struct isert_conn *isert_conn = conn->context;
3430 isert_info("Starting conn %p\n", isert_conn);
3432 mutex_lock(&isert_conn->mutex);
3434 * Only wait for wait_comp_err if the isert_conn made it
3435 * into full feature phase..
3437 if (isert_conn->state == ISER_CONN_INIT) {
3438 mutex_unlock(&isert_conn->mutex);
3441 isert_conn_terminate(isert_conn);
3442 mutex_unlock(&isert_conn->mutex);
3444 isert_wait4flush(isert_conn);
3445 isert_put_unsol_pending_cmds(conn);
3446 isert_wait4cmds(conn);
3447 isert_wait4logout(isert_conn);
3449 queue_work(isert_release_wq, &isert_conn->release_work);
3452 static void isert_free_conn(struct iscsi_conn *conn)
3454 struct isert_conn *isert_conn = conn->context;
3456 isert_wait4flush(isert_conn);
3457 isert_put_conn(isert_conn);
3460 static struct iscsit_transport iser_target_transport = {
3462 .transport_type = ISCSI_INFINIBAND,
3463 .priv_size = sizeof(struct isert_cmd),
3464 .owner = THIS_MODULE,
3465 .iscsit_setup_np = isert_setup_np,
3466 .iscsit_accept_np = isert_accept_np,
3467 .iscsit_free_np = isert_free_np,
3468 .iscsit_wait_conn = isert_wait_conn,
3469 .iscsit_free_conn = isert_free_conn,
3470 .iscsit_get_login_rx = isert_get_login_rx,
3471 .iscsit_put_login_tx = isert_put_login_tx,
3472 .iscsit_immediate_queue = isert_immediate_queue,
3473 .iscsit_response_queue = isert_response_queue,
3474 .iscsit_get_dataout = isert_get_dataout,
3475 .iscsit_queue_data_in = isert_put_datain,
3476 .iscsit_queue_status = isert_put_response,
3477 .iscsit_aborted_task = isert_aborted_task,
3478 .iscsit_get_sup_prot_ops = isert_get_sup_prot_ops,
3481 static int __init isert_init(void)
3485 isert_comp_wq = alloc_workqueue("isert_comp_wq",
3486 WQ_UNBOUND | WQ_HIGHPRI, 0);
3487 if (!isert_comp_wq) {
3488 isert_err("Unable to allocate isert_comp_wq\n");
3493 isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
3494 WQ_UNBOUND_MAX_ACTIVE);
3495 if (!isert_release_wq) {
3496 isert_err("Unable to allocate isert_release_wq\n");
3498 goto destroy_comp_wq;
3501 iscsit_register_transport(&iser_target_transport);
3502 isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
3507 destroy_workqueue(isert_comp_wq);
3512 static void __exit isert_exit(void)
3514 flush_scheduled_work();
3515 destroy_workqueue(isert_release_wq);
3516 destroy_workqueue(isert_comp_wq);
3517 iscsit_unregister_transport(&iser_target_transport);
3518 isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
3521 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
3522 MODULE_VERSION("1.0");
3523 MODULE_AUTHOR("nab@Linux-iSCSI.org");
3524 MODULE_LICENSE("GPL");
3526 module_init(isert_init);
3527 module_exit(isert_exit);