2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
43 #include <linux/lockdep.h>
44 #include <rdma/ib_cache.h>
46 #include <linux/atomic.h>
48 #include <scsi/scsi.h>
49 #include <scsi/scsi_device.h>
50 #include <scsi/scsi_dbg.h>
51 #include <scsi/scsi_tcq.h>
53 #include <scsi/scsi_transport_srp.h>
57 #define DRV_NAME "ib_srp"
58 #define PFX DRV_NAME ": "
59 #define DRV_VERSION "2.0"
60 #define DRV_RELDATE "July 26, 2015"
62 MODULE_AUTHOR("Roland Dreier");
63 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator");
64 MODULE_LICENSE("Dual BSD/GPL");
65 MODULE_VERSION(DRV_VERSION);
66 MODULE_INFO(release_date, DRV_RELDATE);
68 #if !defined(CONFIG_DYNAMIC_DEBUG)
69 #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt)
70 #define DYNAMIC_DEBUG_BRANCH(descriptor) false
73 static unsigned int srp_sg_tablesize;
74 static unsigned int cmd_sg_entries;
75 static unsigned int indirect_sg_entries;
76 static bool allow_ext_sg;
77 static bool prefer_fr = true;
78 static bool register_always = true;
79 static bool never_register;
80 static int topspin_workarounds = 1;
82 module_param(srp_sg_tablesize, uint, 0444);
83 MODULE_PARM_DESC(srp_sg_tablesize, "Deprecated name for cmd_sg_entries");
85 module_param(cmd_sg_entries, uint, 0444);
86 MODULE_PARM_DESC(cmd_sg_entries,
87 "Default number of gather/scatter entries in the SRP command (default is 12, max 255)");
89 module_param(indirect_sg_entries, uint, 0444);
90 MODULE_PARM_DESC(indirect_sg_entries,
91 "Default max number of gather/scatter entries (default is 12, max is " __stringify(SG_MAX_SEGMENTS) ")");
93 module_param(allow_ext_sg, bool, 0444);
94 MODULE_PARM_DESC(allow_ext_sg,
95 "Default behavior when there are more than cmd_sg_entries S/G entries after mapping; fails the request when false (default false)");
97 module_param(topspin_workarounds, int, 0444);
98 MODULE_PARM_DESC(topspin_workarounds,
99 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
101 module_param(prefer_fr, bool, 0444);
102 MODULE_PARM_DESC(prefer_fr,
103 "Whether to use fast registration if both FMR and fast registration are supported");
105 module_param(register_always, bool, 0444);
106 MODULE_PARM_DESC(register_always,
107 "Use memory registration even for contiguous memory regions");
109 module_param(never_register, bool, 0444);
110 MODULE_PARM_DESC(never_register, "Never register memory");
112 static const struct kernel_param_ops srp_tmo_ops;
114 static int srp_reconnect_delay = 10;
115 module_param_cb(reconnect_delay, &srp_tmo_ops, &srp_reconnect_delay,
117 MODULE_PARM_DESC(reconnect_delay, "Time between successive reconnect attempts");
119 static int srp_fast_io_fail_tmo = 15;
120 module_param_cb(fast_io_fail_tmo, &srp_tmo_ops, &srp_fast_io_fail_tmo,
122 MODULE_PARM_DESC(fast_io_fail_tmo,
123 "Number of seconds between the observation of a transport"
124 " layer error and failing all I/O. \"off\" means that this"
125 " functionality is disabled.");
127 static int srp_dev_loss_tmo = 600;
128 module_param_cb(dev_loss_tmo, &srp_tmo_ops, &srp_dev_loss_tmo,
130 MODULE_PARM_DESC(dev_loss_tmo,
131 "Maximum number of seconds that the SRP transport should"
132 " insulate transport layer errors. After this time has been"
133 " exceeded the SCSI host is removed. Should be"
134 " between 1 and " __stringify(SCSI_DEVICE_BLOCK_MAX_TIMEOUT)
135 " if fast_io_fail_tmo has not been set. \"off\" means that"
136 " this functionality is disabled.");
138 static unsigned ch_count;
139 module_param(ch_count, uint, 0444);
140 MODULE_PARM_DESC(ch_count,
141 "Number of RDMA channels to use for communication with an SRP target. Using more than one channel improves performance if the HCA supports multiple completion vectors. The default value is the minimum of four times the number of online CPU sockets and the number of completion vectors supported by the HCA.");
143 static void srp_add_one(struct ib_device *device);
144 static void srp_remove_one(struct ib_device *device, void *client_data);
145 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc);
146 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
148 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
150 static struct scsi_transport_template *ib_srp_transport_template;
151 static struct workqueue_struct *srp_remove_wq;
153 static struct ib_client srp_client = {
156 .remove = srp_remove_one
159 static struct ib_sa_client srp_sa_client;
161 static int srp_tmo_get(char *buffer, const struct kernel_param *kp)
163 int tmo = *(int *)kp->arg;
166 return sprintf(buffer, "%d", tmo);
168 return sprintf(buffer, "off");
171 static int srp_tmo_set(const char *val, const struct kernel_param *kp)
175 res = srp_parse_tmo(&tmo, val);
179 if (kp->arg == &srp_reconnect_delay)
180 res = srp_tmo_valid(tmo, srp_fast_io_fail_tmo,
182 else if (kp->arg == &srp_fast_io_fail_tmo)
183 res = srp_tmo_valid(srp_reconnect_delay, tmo, srp_dev_loss_tmo);
185 res = srp_tmo_valid(srp_reconnect_delay, srp_fast_io_fail_tmo,
189 *(int *)kp->arg = tmo;
195 static const struct kernel_param_ops srp_tmo_ops = {
200 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
202 return (struct srp_target_port *) host->hostdata;
205 static const char *srp_target_info(struct Scsi_Host *host)
207 return host_to_target(host)->target_name;
210 static int srp_target_is_topspin(struct srp_target_port *target)
212 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
213 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
215 return topspin_workarounds &&
216 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
217 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
220 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
222 enum dma_data_direction direction)
226 iu = kmalloc(sizeof *iu, gfp_mask);
230 iu->buf = kzalloc(size, gfp_mask);
234 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
236 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
240 iu->direction = direction;
252 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
257 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
263 static void srp_qp_event(struct ib_event *event, void *context)
265 pr_debug("QP event %s (%d)\n",
266 ib_event_msg(event->event), event->event);
269 static int srp_init_qp(struct srp_target_port *target,
272 struct ib_qp_attr *attr;
275 attr = kmalloc(sizeof *attr, GFP_KERNEL);
279 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev,
280 target->srp_host->port,
281 be16_to_cpu(target->pkey),
286 attr->qp_state = IB_QPS_INIT;
287 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
288 IB_ACCESS_REMOTE_WRITE);
289 attr->port_num = target->srp_host->port;
291 ret = ib_modify_qp(qp, attr,
302 static int srp_new_cm_id(struct srp_rdma_ch *ch)
304 struct srp_target_port *target = ch->target;
305 struct ib_cm_id *new_cm_id;
307 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
309 if (IS_ERR(new_cm_id))
310 return PTR_ERR(new_cm_id);
313 ib_destroy_cm_id(ch->cm_id);
314 ch->cm_id = new_cm_id;
315 ch->path.rec_type = SA_PATH_REC_TYPE_IB;
316 ch->path.sgid = target->sgid;
317 ch->path.dgid = target->orig_dgid;
318 ch->path.pkey = target->pkey;
319 sa_path_set_service_id(&ch->path, target->service_id);
324 static struct ib_fmr_pool *srp_alloc_fmr_pool(struct srp_target_port *target)
326 struct srp_device *dev = target->srp_host->srp_dev;
327 struct ib_fmr_pool_param fmr_param;
329 memset(&fmr_param, 0, sizeof(fmr_param));
330 fmr_param.pool_size = target->mr_pool_size;
331 fmr_param.dirty_watermark = fmr_param.pool_size / 4;
333 fmr_param.max_pages_per_fmr = dev->max_pages_per_mr;
334 fmr_param.page_shift = ilog2(dev->mr_page_size);
335 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
336 IB_ACCESS_REMOTE_WRITE |
337 IB_ACCESS_REMOTE_READ);
339 return ib_create_fmr_pool(dev->pd, &fmr_param);
343 * srp_destroy_fr_pool() - free the resources owned by a pool
344 * @pool: Fast registration pool to be destroyed.
346 static void srp_destroy_fr_pool(struct srp_fr_pool *pool)
349 struct srp_fr_desc *d;
354 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
362 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
363 * @device: IB device to allocate fast registration descriptors for.
364 * @pd: Protection domain associated with the FR descriptors.
365 * @pool_size: Number of descriptors to allocate.
366 * @max_page_list_len: Maximum fast registration work request page list length.
368 static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
369 struct ib_pd *pd, int pool_size,
370 int max_page_list_len)
372 struct srp_fr_pool *pool;
373 struct srp_fr_desc *d;
375 int i, ret = -EINVAL;
380 pool = kzalloc(sizeof(struct srp_fr_pool) +
381 pool_size * sizeof(struct srp_fr_desc), GFP_KERNEL);
384 pool->size = pool_size;
385 pool->max_page_list_len = max_page_list_len;
386 spin_lock_init(&pool->lock);
387 INIT_LIST_HEAD(&pool->free_list);
389 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
390 mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
395 pr_info("%s: ib_alloc_mr() failed. Try to reduce max_cmd_per_lun, max_sect or ch_count\n",
396 dev_name(&device->dev));
400 list_add_tail(&d->entry, &pool->free_list);
407 srp_destroy_fr_pool(pool);
415 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
416 * @pool: Pool to obtain descriptor from.
418 static struct srp_fr_desc *srp_fr_pool_get(struct srp_fr_pool *pool)
420 struct srp_fr_desc *d = NULL;
423 spin_lock_irqsave(&pool->lock, flags);
424 if (!list_empty(&pool->free_list)) {
425 d = list_first_entry(&pool->free_list, typeof(*d), entry);
428 spin_unlock_irqrestore(&pool->lock, flags);
434 * srp_fr_pool_put() - put an FR descriptor back in the free list
435 * @pool: Pool the descriptor was allocated from.
436 * @desc: Pointer to an array of fast registration descriptor pointers.
437 * @n: Number of descriptors to put back.
439 * Note: The caller must already have queued an invalidation request for
440 * desc->mr->rkey before calling this function.
442 static void srp_fr_pool_put(struct srp_fr_pool *pool, struct srp_fr_desc **desc,
448 spin_lock_irqsave(&pool->lock, flags);
449 for (i = 0; i < n; i++)
450 list_add(&desc[i]->entry, &pool->free_list);
451 spin_unlock_irqrestore(&pool->lock, flags);
454 static struct srp_fr_pool *srp_alloc_fr_pool(struct srp_target_port *target)
456 struct srp_device *dev = target->srp_host->srp_dev;
458 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size,
459 dev->max_pages_per_mr);
463 * srp_destroy_qp() - destroy an RDMA queue pair
464 * @qp: RDMA queue pair.
466 * Drain the qp before destroying it. This avoids that the receive
467 * completion handler can access the queue pair while it is
470 static void srp_destroy_qp(struct srp_rdma_ch *ch, struct ib_qp *qp)
472 spin_lock_irq(&ch->lock);
473 ib_process_cq_direct(ch->send_cq, -1);
474 spin_unlock_irq(&ch->lock);
480 static int srp_create_ch_ib(struct srp_rdma_ch *ch)
482 struct srp_target_port *target = ch->target;
483 struct srp_device *dev = target->srp_host->srp_dev;
484 struct ib_qp_init_attr *init_attr;
485 struct ib_cq *recv_cq, *send_cq;
487 struct ib_fmr_pool *fmr_pool = NULL;
488 struct srp_fr_pool *fr_pool = NULL;
489 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2;
492 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
496 /* queue_size + 1 for ib_drain_rq() */
497 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1,
498 ch->comp_vector, IB_POLL_SOFTIRQ);
499 if (IS_ERR(recv_cq)) {
500 ret = PTR_ERR(recv_cq);
504 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size,
505 ch->comp_vector, IB_POLL_DIRECT);
506 if (IS_ERR(send_cq)) {
507 ret = PTR_ERR(send_cq);
511 init_attr->event_handler = srp_qp_event;
512 init_attr->cap.max_send_wr = m * target->queue_size;
513 init_attr->cap.max_recv_wr = target->queue_size + 1;
514 init_attr->cap.max_recv_sge = 1;
515 init_attr->cap.max_send_sge = 1;
516 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
517 init_attr->qp_type = IB_QPT_RC;
518 init_attr->send_cq = send_cq;
519 init_attr->recv_cq = recv_cq;
521 qp = ib_create_qp(dev->pd, init_attr);
527 ret = srp_init_qp(target, qp);
531 if (dev->use_fast_reg) {
532 fr_pool = srp_alloc_fr_pool(target);
533 if (IS_ERR(fr_pool)) {
534 ret = PTR_ERR(fr_pool);
535 shost_printk(KERN_WARNING, target->scsi_host, PFX
536 "FR pool allocation failed (%d)\n", ret);
539 } else if (dev->use_fmr) {
540 fmr_pool = srp_alloc_fmr_pool(target);
541 if (IS_ERR(fmr_pool)) {
542 ret = PTR_ERR(fmr_pool);
543 shost_printk(KERN_WARNING, target->scsi_host, PFX
544 "FMR pool allocation failed (%d)\n", ret);
550 srp_destroy_qp(ch, ch->qp);
552 ib_free_cq(ch->recv_cq);
554 ib_free_cq(ch->send_cq);
557 ch->recv_cq = recv_cq;
558 ch->send_cq = send_cq;
560 if (dev->use_fast_reg) {
562 srp_destroy_fr_pool(ch->fr_pool);
563 ch->fr_pool = fr_pool;
564 } else if (dev->use_fmr) {
566 ib_destroy_fmr_pool(ch->fmr_pool);
567 ch->fmr_pool = fmr_pool;
574 srp_destroy_qp(ch, qp);
588 * Note: this function may be called without srp_alloc_iu_bufs() having been
589 * invoked. Hence the ch->[rt]x_ring checks.
591 static void srp_free_ch_ib(struct srp_target_port *target,
592 struct srp_rdma_ch *ch)
594 struct srp_device *dev = target->srp_host->srp_dev;
601 ib_destroy_cm_id(ch->cm_id);
605 /* If srp_new_cm_id() succeeded but srp_create_ch_ib() not, return. */
609 if (dev->use_fast_reg) {
611 srp_destroy_fr_pool(ch->fr_pool);
612 } else if (dev->use_fmr) {
614 ib_destroy_fmr_pool(ch->fmr_pool);
617 srp_destroy_qp(ch, ch->qp);
618 ib_free_cq(ch->send_cq);
619 ib_free_cq(ch->recv_cq);
622 * Avoid that the SCSI error handler tries to use this channel after
623 * it has been freed. The SCSI error handler can namely continue
624 * trying to perform recovery actions after scsi_remove_host()
630 ch->send_cq = ch->recv_cq = NULL;
633 for (i = 0; i < target->queue_size; ++i)
634 srp_free_iu(target->srp_host, ch->rx_ring[i]);
639 for (i = 0; i < target->queue_size; ++i)
640 srp_free_iu(target->srp_host, ch->tx_ring[i]);
646 static void srp_path_rec_completion(int status,
647 struct sa_path_rec *pathrec,
650 struct srp_rdma_ch *ch = ch_ptr;
651 struct srp_target_port *target = ch->target;
655 shost_printk(KERN_ERR, target->scsi_host,
656 PFX "Got failed path rec status %d\n", status);
662 static int srp_lookup_path(struct srp_rdma_ch *ch)
664 struct srp_target_port *target = ch->target;
667 ch->path.numb_path = 1;
669 init_completion(&ch->done);
671 ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
672 target->srp_host->srp_dev->dev,
673 target->srp_host->port,
675 IB_SA_PATH_REC_SERVICE_ID |
676 IB_SA_PATH_REC_DGID |
677 IB_SA_PATH_REC_SGID |
678 IB_SA_PATH_REC_NUMB_PATH |
680 SRP_PATH_REC_TIMEOUT_MS,
682 srp_path_rec_completion,
683 ch, &ch->path_query);
684 if (ch->path_query_id < 0)
685 return ch->path_query_id;
687 ret = wait_for_completion_interruptible(&ch->done);
692 shost_printk(KERN_WARNING, target->scsi_host,
693 PFX "Path record query failed\n");
698 static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
700 struct srp_target_port *target = ch->target;
702 struct ib_cm_req_param param;
703 struct srp_login_req priv;
707 req = kzalloc(sizeof *req, GFP_KERNEL);
711 req->param.primary_path = &ch->path;
712 req->param.alternate_path = NULL;
713 req->param.service_id = target->service_id;
714 req->param.qp_num = ch->qp->qp_num;
715 req->param.qp_type = ch->qp->qp_type;
716 req->param.private_data = &req->priv;
717 req->param.private_data_len = sizeof req->priv;
718 req->param.flow_control = 1;
720 get_random_bytes(&req->param.starting_psn, 4);
721 req->param.starting_psn &= 0xffffff;
724 * Pick some arbitrary defaults here; we could make these
725 * module parameters if anyone cared about setting them.
727 req->param.responder_resources = 4;
728 req->param.remote_cm_response_timeout = 20;
729 req->param.local_cm_response_timeout = 20;
730 req->param.retry_count = target->tl_retry_count;
731 req->param.rnr_retry_count = 7;
732 req->param.max_cm_retries = 15;
734 req->priv.opcode = SRP_LOGIN_REQ;
736 req->priv.req_it_iu_len = cpu_to_be32(target->max_iu_len);
737 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
738 SRP_BUF_FORMAT_INDIRECT);
739 req->priv.req_flags = (multich ? SRP_MULTICHAN_MULTI :
740 SRP_MULTICHAN_SINGLE);
742 * In the published SRP specification (draft rev. 16a), the
743 * port identifier format is 8 bytes of ID extension followed
744 * by 8 bytes of GUID. Older drafts put the two halves in the
745 * opposite order, so that the GUID comes first.
747 * Targets conforming to these obsolete drafts can be
748 * recognized by the I/O Class they report.
750 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
751 memcpy(req->priv.initiator_port_id,
752 &target->sgid.global.interface_id, 8);
753 memcpy(req->priv.initiator_port_id + 8,
754 &target->initiator_ext, 8);
755 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
756 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
758 memcpy(req->priv.initiator_port_id,
759 &target->initiator_ext, 8);
760 memcpy(req->priv.initiator_port_id + 8,
761 &target->sgid.global.interface_id, 8);
762 memcpy(req->priv.target_port_id, &target->id_ext, 8);
763 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
767 * Topspin/Cisco SRP targets will reject our login unless we
768 * zero out the first 8 bytes of our initiator port ID and set
769 * the second 8 bytes to the local node GUID.
771 if (srp_target_is_topspin(target)) {
772 shost_printk(KERN_DEBUG, target->scsi_host,
773 PFX "Topspin/Cisco initiator port ID workaround "
774 "activated for target GUID %016llx\n",
775 be64_to_cpu(target->ioc_guid));
776 memset(req->priv.initiator_port_id, 0, 8);
777 memcpy(req->priv.initiator_port_id + 8,
778 &target->srp_host->srp_dev->dev->node_guid, 8);
781 status = ib_send_cm_req(ch->cm_id, &req->param);
788 static bool srp_queue_remove_work(struct srp_target_port *target)
790 bool changed = false;
792 spin_lock_irq(&target->lock);
793 if (target->state != SRP_TARGET_REMOVED) {
794 target->state = SRP_TARGET_REMOVED;
797 spin_unlock_irq(&target->lock);
800 queue_work(srp_remove_wq, &target->remove_work);
805 static void srp_disconnect_target(struct srp_target_port *target)
807 struct srp_rdma_ch *ch;
810 /* XXX should send SRP_I_LOGOUT request */
812 for (i = 0; i < target->ch_count; i++) {
814 ch->connected = false;
815 if (ch->cm_id && ib_send_cm_dreq(ch->cm_id, NULL, 0)) {
816 shost_printk(KERN_DEBUG, target->scsi_host,
817 PFX "Sending CM DREQ failed\n");
822 static void srp_free_req_data(struct srp_target_port *target,
823 struct srp_rdma_ch *ch)
825 struct srp_device *dev = target->srp_host->srp_dev;
826 struct ib_device *ibdev = dev->dev;
827 struct srp_request *req;
833 for (i = 0; i < target->req_ring_size; ++i) {
834 req = &ch->req_ring[i];
835 if (dev->use_fast_reg) {
838 kfree(req->fmr_list);
839 kfree(req->map_page);
841 if (req->indirect_dma_addr) {
842 ib_dma_unmap_single(ibdev, req->indirect_dma_addr,
843 target->indirect_size,
846 kfree(req->indirect_desc);
853 static int srp_alloc_req_data(struct srp_rdma_ch *ch)
855 struct srp_target_port *target = ch->target;
856 struct srp_device *srp_dev = target->srp_host->srp_dev;
857 struct ib_device *ibdev = srp_dev->dev;
858 struct srp_request *req;
861 int i, ret = -ENOMEM;
863 ch->req_ring = kcalloc(target->req_ring_size, sizeof(*ch->req_ring),
868 for (i = 0; i < target->req_ring_size; ++i) {
869 req = &ch->req_ring[i];
870 mr_list = kmalloc(target->mr_per_cmd * sizeof(void *),
874 if (srp_dev->use_fast_reg) {
875 req->fr_list = mr_list;
877 req->fmr_list = mr_list;
878 req->map_page = kmalloc(srp_dev->max_pages_per_mr *
879 sizeof(void *), GFP_KERNEL);
883 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL);
884 if (!req->indirect_desc)
887 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc,
888 target->indirect_size,
890 if (ib_dma_mapping_error(ibdev, dma_addr))
893 req->indirect_dma_addr = dma_addr;
902 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
903 * @shost: SCSI host whose attributes to remove from sysfs.
905 * Note: Any attributes defined in the host template and that did not exist
906 * before invocation of this function will be ignored.
908 static void srp_del_scsi_host_attr(struct Scsi_Host *shost)
910 struct device_attribute **attr;
912 for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr)
913 device_remove_file(&shost->shost_dev, *attr);
916 static void srp_remove_target(struct srp_target_port *target)
918 struct srp_rdma_ch *ch;
921 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
923 srp_del_scsi_host_attr(target->scsi_host);
924 srp_rport_get(target->rport);
925 srp_remove_host(target->scsi_host);
926 scsi_remove_host(target->scsi_host);
927 srp_stop_rport_timers(target->rport);
928 srp_disconnect_target(target);
929 for (i = 0; i < target->ch_count; i++) {
931 srp_free_ch_ib(target, ch);
933 cancel_work_sync(&target->tl_err_work);
934 srp_rport_put(target->rport);
935 for (i = 0; i < target->ch_count; i++) {
937 srp_free_req_data(target, ch);
942 spin_lock(&target->srp_host->target_lock);
943 list_del(&target->list);
944 spin_unlock(&target->srp_host->target_lock);
946 scsi_host_put(target->scsi_host);
949 static void srp_remove_work(struct work_struct *work)
951 struct srp_target_port *target =
952 container_of(work, struct srp_target_port, remove_work);
954 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED);
956 srp_remove_target(target);
959 static void srp_rport_delete(struct srp_rport *rport)
961 struct srp_target_port *target = rport->lld_data;
963 srp_queue_remove_work(target);
967 * srp_connected_ch() - number of connected channels
968 * @target: SRP target port.
970 static int srp_connected_ch(struct srp_target_port *target)
974 for (i = 0; i < target->ch_count; i++)
975 c += target->ch[i].connected;
980 static int srp_connect_ch(struct srp_rdma_ch *ch, bool multich)
982 struct srp_target_port *target = ch->target;
985 WARN_ON_ONCE(!multich && srp_connected_ch(target) > 0);
987 ret = srp_lookup_path(ch);
992 init_completion(&ch->done);
993 ret = srp_send_req(ch, multich);
996 ret = wait_for_completion_interruptible(&ch->done);
1001 * The CM event handling code will set status to
1002 * SRP_PORT_REDIRECT if we get a port redirect REJ
1003 * back, or SRP_DLID_REDIRECT if we get a lid/qp
1004 * redirect REJ back.
1009 ch->connected = true;
1012 case SRP_PORT_REDIRECT:
1013 ret = srp_lookup_path(ch);
1018 case SRP_DLID_REDIRECT:
1021 case SRP_STALE_CONN:
1022 shost_printk(KERN_ERR, target->scsi_host, PFX
1023 "giving up on stale connection\n");
1033 return ret <= 0 ? ret : -ENODEV;
1036 static void srp_inv_rkey_err_done(struct ib_cq *cq, struct ib_wc *wc)
1038 srp_handle_qp_err(cq, wc, "INV RKEY");
1041 static int srp_inv_rkey(struct srp_request *req, struct srp_rdma_ch *ch,
1044 struct ib_send_wr *bad_wr;
1045 struct ib_send_wr wr = {
1046 .opcode = IB_WR_LOCAL_INV,
1050 .ex.invalidate_rkey = rkey,
1053 wr.wr_cqe = &req->reg_cqe;
1054 req->reg_cqe.done = srp_inv_rkey_err_done;
1055 return ib_post_send(ch->qp, &wr, &bad_wr);
1058 static void srp_unmap_data(struct scsi_cmnd *scmnd,
1059 struct srp_rdma_ch *ch,
1060 struct srp_request *req)
1062 struct srp_target_port *target = ch->target;
1063 struct srp_device *dev = target->srp_host->srp_dev;
1064 struct ib_device *ibdev = dev->dev;
1067 if (!scsi_sglist(scmnd) ||
1068 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
1069 scmnd->sc_data_direction != DMA_FROM_DEVICE))
1072 if (dev->use_fast_reg) {
1073 struct srp_fr_desc **pfr;
1075 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) {
1076 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey);
1078 shost_printk(KERN_ERR, target->scsi_host, PFX
1079 "Queueing INV WR for rkey %#x failed (%d)\n",
1080 (*pfr)->mr->rkey, res);
1081 queue_work(system_long_wq,
1082 &target->tl_err_work);
1086 srp_fr_pool_put(ch->fr_pool, req->fr_list,
1088 } else if (dev->use_fmr) {
1089 struct ib_pool_fmr **pfmr;
1091 for (i = req->nmdesc, pfmr = req->fmr_list; i > 0; i--, pfmr++)
1092 ib_fmr_pool_unmap(*pfmr);
1095 ib_dma_unmap_sg(ibdev, scsi_sglist(scmnd), scsi_sg_count(scmnd),
1096 scmnd->sc_data_direction);
1100 * srp_claim_req - Take ownership of the scmnd associated with a request.
1101 * @ch: SRP RDMA channel.
1102 * @req: SRP request.
1103 * @sdev: If not NULL, only take ownership for this SCSI device.
1104 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1105 * ownership of @req->scmnd if it equals @scmnd.
1108 * Either NULL or a pointer to the SCSI command the caller became owner of.
1110 static struct scsi_cmnd *srp_claim_req(struct srp_rdma_ch *ch,
1111 struct srp_request *req,
1112 struct scsi_device *sdev,
1113 struct scsi_cmnd *scmnd)
1115 unsigned long flags;
1117 spin_lock_irqsave(&ch->lock, flags);
1119 (!sdev || req->scmnd->device == sdev) &&
1120 (!scmnd || req->scmnd == scmnd)) {
1126 spin_unlock_irqrestore(&ch->lock, flags);
1132 * srp_free_req() - Unmap data and adjust ch->req_lim.
1133 * @ch: SRP RDMA channel.
1134 * @req: Request to be freed.
1135 * @scmnd: SCSI command associated with @req.
1136 * @req_lim_delta: Amount to be added to @target->req_lim.
1138 static void srp_free_req(struct srp_rdma_ch *ch, struct srp_request *req,
1139 struct scsi_cmnd *scmnd, s32 req_lim_delta)
1141 unsigned long flags;
1143 srp_unmap_data(scmnd, ch, req);
1145 spin_lock_irqsave(&ch->lock, flags);
1146 ch->req_lim += req_lim_delta;
1147 spin_unlock_irqrestore(&ch->lock, flags);
1150 static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req,
1151 struct scsi_device *sdev, int result)
1153 struct scsi_cmnd *scmnd = srp_claim_req(ch, req, sdev, NULL);
1156 srp_free_req(ch, req, scmnd, 0);
1157 scmnd->result = result;
1158 scmnd->scsi_done(scmnd);
1162 static void srp_terminate_io(struct srp_rport *rport)
1164 struct srp_target_port *target = rport->lld_data;
1165 struct srp_rdma_ch *ch;
1166 struct Scsi_Host *shost = target->scsi_host;
1167 struct scsi_device *sdev;
1171 * Invoking srp_terminate_io() while srp_queuecommand() is running
1172 * is not safe. Hence the warning statement below.
1174 shost_for_each_device(sdev, shost)
1175 WARN_ON_ONCE(sdev->request_queue->request_fn_active);
1177 for (i = 0; i < target->ch_count; i++) {
1178 ch = &target->ch[i];
1180 for (j = 0; j < target->req_ring_size; ++j) {
1181 struct srp_request *req = &ch->req_ring[j];
1183 srp_finish_req(ch, req, NULL,
1184 DID_TRANSPORT_FAILFAST << 16);
1190 * It is up to the caller to ensure that srp_rport_reconnect() calls are
1191 * serialized and that no concurrent srp_queuecommand(), srp_abort(),
1192 * srp_reset_device() or srp_reset_host() calls will occur while this function
1193 * is in progress. One way to realize that is not to call this function
1194 * directly but to call srp_reconnect_rport() instead since that last function
1195 * serializes calls of this function via rport->mutex and also blocks
1196 * srp_queuecommand() calls before invoking this function.
1198 static int srp_rport_reconnect(struct srp_rport *rport)
1200 struct srp_target_port *target = rport->lld_data;
1201 struct srp_rdma_ch *ch;
1203 bool multich = false;
1205 srp_disconnect_target(target);
1207 if (target->state == SRP_TARGET_SCANNING)
1211 * Now get a new local CM ID so that we avoid confusing the target in
1212 * case things are really fouled up. Doing so also ensures that all CM
1213 * callbacks will have finished before a new QP is allocated.
1215 for (i = 0; i < target->ch_count; i++) {
1216 ch = &target->ch[i];
1217 ret += srp_new_cm_id(ch);
1219 for (i = 0; i < target->ch_count; i++) {
1220 ch = &target->ch[i];
1221 for (j = 0; j < target->req_ring_size; ++j) {
1222 struct srp_request *req = &ch->req_ring[j];
1224 srp_finish_req(ch, req, NULL, DID_RESET << 16);
1227 for (i = 0; i < target->ch_count; i++) {
1228 ch = &target->ch[i];
1230 * Whether or not creating a new CM ID succeeded, create a new
1231 * QP. This guarantees that all completion callback function
1232 * invocations have finished before request resetting starts.
1234 ret += srp_create_ch_ib(ch);
1236 INIT_LIST_HEAD(&ch->free_tx);
1237 for (j = 0; j < target->queue_size; ++j)
1238 list_add(&ch->tx_ring[j]->list, &ch->free_tx);
1241 target->qp_in_error = false;
1243 for (i = 0; i < target->ch_count; i++) {
1244 ch = &target->ch[i];
1247 ret = srp_connect_ch(ch, multich);
1252 shost_printk(KERN_INFO, target->scsi_host,
1253 PFX "reconnect succeeded\n");
1258 static void srp_map_desc(struct srp_map_state *state, dma_addr_t dma_addr,
1259 unsigned int dma_len, u32 rkey)
1261 struct srp_direct_buf *desc = state->desc;
1263 WARN_ON_ONCE(!dma_len);
1265 desc->va = cpu_to_be64(dma_addr);
1266 desc->key = cpu_to_be32(rkey);
1267 desc->len = cpu_to_be32(dma_len);
1269 state->total_len += dma_len;
1274 static int srp_map_finish_fmr(struct srp_map_state *state,
1275 struct srp_rdma_ch *ch)
1277 struct srp_target_port *target = ch->target;
1278 struct srp_device *dev = target->srp_host->srp_dev;
1279 struct ib_pd *pd = target->pd;
1280 struct ib_pool_fmr *fmr;
1283 if (state->fmr.next >= state->fmr.end) {
1284 shost_printk(KERN_ERR, ch->target->scsi_host,
1285 PFX "Out of MRs (mr_per_cmd = %d)\n",
1286 ch->target->mr_per_cmd);
1290 WARN_ON_ONCE(!dev->use_fmr);
1292 if (state->npages == 0)
1295 if (state->npages == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1296 srp_map_desc(state, state->base_dma_addr, state->dma_len,
1297 pd->unsafe_global_rkey);
1301 fmr = ib_fmr_pool_map_phys(ch->fmr_pool, state->pages,
1302 state->npages, io_addr);
1304 return PTR_ERR(fmr);
1306 *state->fmr.next++ = fmr;
1309 srp_map_desc(state, state->base_dma_addr & ~dev->mr_page_mask,
1310 state->dma_len, fmr->fmr->rkey);
1319 static void srp_reg_mr_err_done(struct ib_cq *cq, struct ib_wc *wc)
1321 srp_handle_qp_err(cq, wc, "FAST REG");
1325 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1326 * where to start in the first element. If sg_offset_p != NULL then
1327 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1328 * byte that has not yet been mapped.
1330 static int srp_map_finish_fr(struct srp_map_state *state,
1331 struct srp_request *req,
1332 struct srp_rdma_ch *ch, int sg_nents,
1333 unsigned int *sg_offset_p)
1335 struct srp_target_port *target = ch->target;
1336 struct srp_device *dev = target->srp_host->srp_dev;
1337 struct ib_pd *pd = target->pd;
1338 struct ib_send_wr *bad_wr;
1339 struct ib_reg_wr wr;
1340 struct srp_fr_desc *desc;
1344 if (state->fr.next >= state->fr.end) {
1345 shost_printk(KERN_ERR, ch->target->scsi_host,
1346 PFX "Out of MRs (mr_per_cmd = %d)\n",
1347 ch->target->mr_per_cmd);
1351 WARN_ON_ONCE(!dev->use_fast_reg);
1353 if (sg_nents == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1354 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1356 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset,
1357 sg_dma_len(state->sg) - sg_offset,
1358 pd->unsafe_global_rkey);
1364 desc = srp_fr_pool_get(ch->fr_pool);
1368 rkey = ib_inc_rkey(desc->mr->rkey);
1369 ib_update_fast_reg_key(desc->mr, rkey);
1371 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p,
1373 if (unlikely(n < 0)) {
1374 srp_fr_pool_put(ch->fr_pool, &desc, 1);
1375 pr_debug("%s: ib_map_mr_sg(%d, %d) returned %d.\n",
1376 dev_name(&req->scmnd->device->sdev_gendev), sg_nents,
1377 sg_offset_p ? *sg_offset_p : -1, n);
1381 WARN_ON_ONCE(desc->mr->length == 0);
1383 req->reg_cqe.done = srp_reg_mr_err_done;
1386 wr.wr.opcode = IB_WR_REG_MR;
1387 wr.wr.wr_cqe = &req->reg_cqe;
1389 wr.wr.send_flags = 0;
1391 wr.key = desc->mr->rkey;
1392 wr.access = (IB_ACCESS_LOCAL_WRITE |
1393 IB_ACCESS_REMOTE_READ |
1394 IB_ACCESS_REMOTE_WRITE);
1396 *state->fr.next++ = desc;
1399 srp_map_desc(state, desc->mr->iova,
1400 desc->mr->length, desc->mr->rkey);
1402 err = ib_post_send(ch->qp, &wr.wr, &bad_wr);
1403 if (unlikely(err)) {
1404 WARN_ON_ONCE(err == -ENOMEM);
1411 static int srp_map_sg_entry(struct srp_map_state *state,
1412 struct srp_rdma_ch *ch,
1413 struct scatterlist *sg)
1415 struct srp_target_port *target = ch->target;
1416 struct srp_device *dev = target->srp_host->srp_dev;
1417 struct ib_device *ibdev = dev->dev;
1418 dma_addr_t dma_addr = ib_sg_dma_address(ibdev, sg);
1419 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
1420 unsigned int len = 0;
1423 WARN_ON_ONCE(!dma_len);
1426 unsigned offset = dma_addr & ~dev->mr_page_mask;
1428 if (state->npages == dev->max_pages_per_mr ||
1429 (state->npages > 0 && offset != 0)) {
1430 ret = srp_map_finish_fmr(state, ch);
1435 len = min_t(unsigned int, dma_len, dev->mr_page_size - offset);
1438 state->base_dma_addr = dma_addr;
1439 state->pages[state->npages++] = dma_addr & dev->mr_page_mask;
1440 state->dma_len += len;
1446 * If the end of the MR is not on a page boundary then we need to
1447 * close it out and start a new one -- we can only merge at page
1451 if ((dma_addr & ~dev->mr_page_mask) != 0)
1452 ret = srp_map_finish_fmr(state, ch);
1456 static int srp_map_sg_fmr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1457 struct srp_request *req, struct scatterlist *scat,
1460 struct scatterlist *sg;
1463 state->pages = req->map_page;
1464 state->fmr.next = req->fmr_list;
1465 state->fmr.end = req->fmr_list + ch->target->mr_per_cmd;
1467 for_each_sg(scat, sg, count, i) {
1468 ret = srp_map_sg_entry(state, ch, sg);
1473 ret = srp_map_finish_fmr(state, ch);
1480 static int srp_map_sg_fr(struct srp_map_state *state, struct srp_rdma_ch *ch,
1481 struct srp_request *req, struct scatterlist *scat,
1484 unsigned int sg_offset = 0;
1486 state->fr.next = req->fr_list;
1487 state->fr.end = req->fr_list + ch->target->mr_per_cmd;
1496 n = srp_map_finish_fr(state, req, ch, count, &sg_offset);
1497 if (unlikely(n < 0))
1501 for (i = 0; i < n; i++)
1502 state->sg = sg_next(state->sg);
1508 static int srp_map_sg_dma(struct srp_map_state *state, struct srp_rdma_ch *ch,
1509 struct srp_request *req, struct scatterlist *scat,
1512 struct srp_target_port *target = ch->target;
1513 struct srp_device *dev = target->srp_host->srp_dev;
1514 struct scatterlist *sg;
1517 for_each_sg(scat, sg, count, i) {
1518 srp_map_desc(state, ib_sg_dma_address(dev->dev, sg),
1519 ib_sg_dma_len(dev->dev, sg),
1520 target->pd->unsafe_global_rkey);
1527 * Register the indirect data buffer descriptor with the HCA.
1529 * Note: since the indirect data buffer descriptor has been allocated with
1530 * kmalloc() it is guaranteed that this buffer is a physically contiguous
1533 static int srp_map_idb(struct srp_rdma_ch *ch, struct srp_request *req,
1534 void **next_mr, void **end_mr, u32 idb_len,
1537 struct srp_target_port *target = ch->target;
1538 struct srp_device *dev = target->srp_host->srp_dev;
1539 struct srp_map_state state;
1540 struct srp_direct_buf idb_desc;
1542 struct scatterlist idb_sg[1];
1545 memset(&state, 0, sizeof(state));
1546 memset(&idb_desc, 0, sizeof(idb_desc));
1547 state.gen.next = next_mr;
1548 state.gen.end = end_mr;
1549 state.desc = &idb_desc;
1550 state.base_dma_addr = req->indirect_dma_addr;
1551 state.dma_len = idb_len;
1553 if (dev->use_fast_reg) {
1555 sg_init_one(idb_sg, req->indirect_desc, idb_len);
1556 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */
1557 #ifdef CONFIG_NEED_SG_DMA_LENGTH
1558 idb_sg->dma_length = idb_sg->length; /* hack^2 */
1560 ret = srp_map_finish_fr(&state, req, ch, 1, NULL);
1563 WARN_ON_ONCE(ret < 1);
1564 } else if (dev->use_fmr) {
1565 state.pages = idb_pages;
1566 state.pages[0] = (req->indirect_dma_addr &
1569 ret = srp_map_finish_fmr(&state, ch);
1576 *idb_rkey = idb_desc.key;
1581 static void srp_check_mapping(struct srp_map_state *state,
1582 struct srp_rdma_ch *ch, struct srp_request *req,
1583 struct scatterlist *scat, int count)
1585 struct srp_device *dev = ch->target->srp_host->srp_dev;
1586 struct srp_fr_desc **pfr;
1587 u64 desc_len = 0, mr_len = 0;
1590 for (i = 0; i < state->ndesc; i++)
1591 desc_len += be32_to_cpu(req->indirect_desc[i].len);
1592 if (dev->use_fast_reg)
1593 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++)
1594 mr_len += (*pfr)->mr->length;
1595 else if (dev->use_fmr)
1596 for (i = 0; i < state->nmdesc; i++)
1597 mr_len += be32_to_cpu(req->indirect_desc[i].len);
1598 if (desc_len != scsi_bufflen(req->scmnd) ||
1599 mr_len > scsi_bufflen(req->scmnd))
1600 pr_err("Inconsistent: scsi len %d <> desc len %lld <> mr len %lld; ndesc %d; nmdesc = %d\n",
1601 scsi_bufflen(req->scmnd), desc_len, mr_len,
1602 state->ndesc, state->nmdesc);
1606 * srp_map_data() - map SCSI data buffer onto an SRP request
1607 * @scmnd: SCSI command to map
1608 * @ch: SRP RDMA channel
1611 * Returns the length in bytes of the SRP_CMD IU or a negative value if
1614 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_rdma_ch *ch,
1615 struct srp_request *req)
1617 struct srp_target_port *target = ch->target;
1618 struct ib_pd *pd = target->pd;
1619 struct scatterlist *scat;
1620 struct srp_cmd *cmd = req->cmd->buf;
1621 int len, nents, count, ret;
1622 struct srp_device *dev;
1623 struct ib_device *ibdev;
1624 struct srp_map_state state;
1625 struct srp_indirect_buf *indirect_hdr;
1626 u32 idb_len, table_len;
1630 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
1631 return sizeof (struct srp_cmd);
1633 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
1634 scmnd->sc_data_direction != DMA_TO_DEVICE) {
1635 shost_printk(KERN_WARNING, target->scsi_host,
1636 PFX "Unhandled data direction %d\n",
1637 scmnd->sc_data_direction);
1641 nents = scsi_sg_count(scmnd);
1642 scat = scsi_sglist(scmnd);
1644 dev = target->srp_host->srp_dev;
1647 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
1648 if (unlikely(count == 0))
1651 fmt = SRP_DATA_DESC_DIRECT;
1652 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
1654 if (count == 1 && (pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1656 * The midlayer only generated a single gather/scatter
1657 * entry, or DMA mapping coalesced everything to a
1658 * single entry. So a direct descriptor along with
1659 * the DMA MR suffices.
1661 struct srp_direct_buf *buf = (void *) cmd->add_data;
1663 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
1664 buf->key = cpu_to_be32(pd->unsafe_global_rkey);
1665 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
1672 * We have more than one scatter/gather entry, so build our indirect
1673 * descriptor table, trying to merge as many entries as we can.
1675 indirect_hdr = (void *) cmd->add_data;
1677 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr,
1678 target->indirect_size, DMA_TO_DEVICE);
1680 memset(&state, 0, sizeof(state));
1681 state.desc = req->indirect_desc;
1682 if (dev->use_fast_reg)
1683 ret = srp_map_sg_fr(&state, ch, req, scat, count);
1684 else if (dev->use_fmr)
1685 ret = srp_map_sg_fmr(&state, ch, req, scat, count);
1687 ret = srp_map_sg_dma(&state, ch, req, scat, count);
1688 req->nmdesc = state.nmdesc;
1693 DEFINE_DYNAMIC_DEBUG_METADATA(ddm,
1694 "Memory mapping consistency check");
1695 if (DYNAMIC_DEBUG_BRANCH(ddm))
1696 srp_check_mapping(&state, ch, req, scat, count);
1699 /* We've mapped the request, now pull as much of the indirect
1700 * descriptor table as we can into the command buffer. If this
1701 * target is not using an external indirect table, we are
1702 * guaranteed to fit into the command, as the SCSI layer won't
1703 * give us more S/G entries than we allow.
1705 if (state.ndesc == 1) {
1707 * Memory registration collapsed the sg-list into one entry,
1708 * so use a direct descriptor.
1710 struct srp_direct_buf *buf = (void *) cmd->add_data;
1712 *buf = req->indirect_desc[0];
1716 if (unlikely(target->cmd_sg_cnt < state.ndesc &&
1717 !target->allow_ext_sg)) {
1718 shost_printk(KERN_ERR, target->scsi_host,
1719 "Could not fit S/G list into SRP_CMD\n");
1724 count = min(state.ndesc, target->cmd_sg_cnt);
1725 table_len = state.ndesc * sizeof (struct srp_direct_buf);
1726 idb_len = sizeof(struct srp_indirect_buf) + table_len;
1728 fmt = SRP_DATA_DESC_INDIRECT;
1729 len = sizeof(struct srp_cmd) + sizeof (struct srp_indirect_buf);
1730 len += count * sizeof (struct srp_direct_buf);
1732 memcpy(indirect_hdr->desc_list, req->indirect_desc,
1733 count * sizeof (struct srp_direct_buf));
1735 if (!(pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)) {
1736 ret = srp_map_idb(ch, req, state.gen.next, state.gen.end,
1737 idb_len, &idb_rkey);
1742 idb_rkey = cpu_to_be32(pd->unsafe_global_rkey);
1745 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr);
1746 indirect_hdr->table_desc.key = idb_rkey;
1747 indirect_hdr->table_desc.len = cpu_to_be32(table_len);
1748 indirect_hdr->len = cpu_to_be32(state.total_len);
1750 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1751 cmd->data_out_desc_cnt = count;
1753 cmd->data_in_desc_cnt = count;
1755 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len,
1759 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
1760 cmd->buf_fmt = fmt << 4;
1767 srp_unmap_data(scmnd, ch, req);
1768 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size)
1774 * Return an IU and possible credit to the free pool
1776 static void srp_put_tx_iu(struct srp_rdma_ch *ch, struct srp_iu *iu,
1777 enum srp_iu_type iu_type)
1779 unsigned long flags;
1781 spin_lock_irqsave(&ch->lock, flags);
1782 list_add(&iu->list, &ch->free_tx);
1783 if (iu_type != SRP_IU_RSP)
1785 spin_unlock_irqrestore(&ch->lock, flags);
1789 * Must be called with ch->lock held to protect req_lim and free_tx.
1790 * If IU is not sent, it must be returned using srp_put_tx_iu().
1793 * An upper limit for the number of allocated information units for each
1795 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1796 * more than Scsi_Host.can_queue requests.
1797 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1798 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1799 * one unanswered SRP request to an initiator.
1801 static struct srp_iu *__srp_get_tx_iu(struct srp_rdma_ch *ch,
1802 enum srp_iu_type iu_type)
1804 struct srp_target_port *target = ch->target;
1805 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
1808 lockdep_assert_held(&ch->lock);
1810 ib_process_cq_direct(ch->send_cq, -1);
1812 if (list_empty(&ch->free_tx))
1815 /* Initiator responses to target requests do not consume credits */
1816 if (iu_type != SRP_IU_RSP) {
1817 if (ch->req_lim <= rsv) {
1818 ++target->zero_req_lim;
1825 iu = list_first_entry(&ch->free_tx, struct srp_iu, list);
1826 list_del(&iu->list);
1831 * Note: if this function is called from inside ib_drain_sq() then it will
1832 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1833 * with status IB_WC_SUCCESS then that's a bug.
1835 static void srp_send_done(struct ib_cq *cq, struct ib_wc *wc)
1837 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
1838 struct srp_rdma_ch *ch = cq->cq_context;
1840 if (unlikely(wc->status != IB_WC_SUCCESS)) {
1841 srp_handle_qp_err(cq, wc, "SEND");
1845 lockdep_assert_held(&ch->lock);
1847 list_add(&iu->list, &ch->free_tx);
1850 static int srp_post_send(struct srp_rdma_ch *ch, struct srp_iu *iu, int len)
1852 struct srp_target_port *target = ch->target;
1854 struct ib_send_wr wr, *bad_wr;
1856 list.addr = iu->dma;
1858 list.lkey = target->lkey;
1860 iu->cqe.done = srp_send_done;
1863 wr.wr_cqe = &iu->cqe;
1866 wr.opcode = IB_WR_SEND;
1867 wr.send_flags = IB_SEND_SIGNALED;
1869 return ib_post_send(ch->qp, &wr, &bad_wr);
1872 static int srp_post_recv(struct srp_rdma_ch *ch, struct srp_iu *iu)
1874 struct srp_target_port *target = ch->target;
1875 struct ib_recv_wr wr, *bad_wr;
1878 list.addr = iu->dma;
1879 list.length = iu->size;
1880 list.lkey = target->lkey;
1882 iu->cqe.done = srp_recv_done;
1885 wr.wr_cqe = &iu->cqe;
1889 return ib_post_recv(ch->qp, &wr, &bad_wr);
1892 static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp)
1894 struct srp_target_port *target = ch->target;
1895 struct srp_request *req;
1896 struct scsi_cmnd *scmnd;
1897 unsigned long flags;
1899 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
1900 spin_lock_irqsave(&ch->lock, flags);
1901 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1902 if (rsp->tag == ch->tsk_mgmt_tag) {
1903 ch->tsk_mgmt_status = -1;
1904 if (be32_to_cpu(rsp->resp_data_len) >= 4)
1905 ch->tsk_mgmt_status = rsp->data[3];
1906 complete(&ch->tsk_mgmt_done);
1908 shost_printk(KERN_ERR, target->scsi_host,
1909 "Received tsk mgmt response too late for tag %#llx\n",
1912 spin_unlock_irqrestore(&ch->lock, flags);
1914 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag);
1915 if (scmnd && scmnd->host_scribble) {
1916 req = (void *)scmnd->host_scribble;
1917 scmnd = srp_claim_req(ch, req, NULL, scmnd);
1922 shost_printk(KERN_ERR, target->scsi_host,
1923 "Null scmnd for RSP w/tag %#016llx received on ch %td / QP %#x\n",
1924 rsp->tag, ch - target->ch, ch->qp->qp_num);
1926 spin_lock_irqsave(&ch->lock, flags);
1927 ch->req_lim += be32_to_cpu(rsp->req_lim_delta);
1928 spin_unlock_irqrestore(&ch->lock, flags);
1932 scmnd->result = rsp->status;
1934 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
1935 memcpy(scmnd->sense_buffer, rsp->data +
1936 be32_to_cpu(rsp->resp_data_len),
1937 min_t(int, be32_to_cpu(rsp->sense_data_len),
1938 SCSI_SENSE_BUFFERSIZE));
1941 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER))
1942 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
1943 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DIOVER))
1944 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_in_res_cnt));
1945 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER))
1946 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
1947 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOOVER))
1948 scsi_set_resid(scmnd, -be32_to_cpu(rsp->data_out_res_cnt));
1950 srp_free_req(ch, req, scmnd,
1951 be32_to_cpu(rsp->req_lim_delta));
1953 scmnd->host_scribble = NULL;
1954 scmnd->scsi_done(scmnd);
1958 static int srp_response_common(struct srp_rdma_ch *ch, s32 req_delta,
1961 struct srp_target_port *target = ch->target;
1962 struct ib_device *dev = target->srp_host->srp_dev->dev;
1963 unsigned long flags;
1967 spin_lock_irqsave(&ch->lock, flags);
1968 ch->req_lim += req_delta;
1969 iu = __srp_get_tx_iu(ch, SRP_IU_RSP);
1970 spin_unlock_irqrestore(&ch->lock, flags);
1973 shost_printk(KERN_ERR, target->scsi_host, PFX
1974 "no IU available to send response\n");
1978 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
1979 memcpy(iu->buf, rsp, len);
1980 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
1982 err = srp_post_send(ch, iu, len);
1984 shost_printk(KERN_ERR, target->scsi_host, PFX
1985 "unable to post response: %d\n", err);
1986 srp_put_tx_iu(ch, iu, SRP_IU_RSP);
1992 static void srp_process_cred_req(struct srp_rdma_ch *ch,
1993 struct srp_cred_req *req)
1995 struct srp_cred_rsp rsp = {
1996 .opcode = SRP_CRED_RSP,
1999 s32 delta = be32_to_cpu(req->req_lim_delta);
2001 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2002 shost_printk(KERN_ERR, ch->target->scsi_host, PFX
2003 "problems processing SRP_CRED_REQ\n");
2006 static void srp_process_aer_req(struct srp_rdma_ch *ch,
2007 struct srp_aer_req *req)
2009 struct srp_target_port *target = ch->target;
2010 struct srp_aer_rsp rsp = {
2011 .opcode = SRP_AER_RSP,
2014 s32 delta = be32_to_cpu(req->req_lim_delta);
2016 shost_printk(KERN_ERR, target->scsi_host, PFX
2017 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun));
2019 if (srp_response_common(ch, delta, &rsp, sizeof(rsp)))
2020 shost_printk(KERN_ERR, target->scsi_host, PFX
2021 "problems processing SRP_AER_REQ\n");
2024 static void srp_recv_done(struct ib_cq *cq, struct ib_wc *wc)
2026 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe);
2027 struct srp_rdma_ch *ch = cq->cq_context;
2028 struct srp_target_port *target = ch->target;
2029 struct ib_device *dev = target->srp_host->srp_dev->dev;
2033 if (unlikely(wc->status != IB_WC_SUCCESS)) {
2034 srp_handle_qp_err(cq, wc, "RECV");
2038 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len,
2041 opcode = *(u8 *) iu->buf;
2044 shost_printk(KERN_ERR, target->scsi_host,
2045 PFX "recv completion, opcode 0x%02x\n", opcode);
2046 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
2047 iu->buf, wc->byte_len, true);
2052 srp_process_rsp(ch, iu->buf);
2056 srp_process_cred_req(ch, iu->buf);
2060 srp_process_aer_req(ch, iu->buf);
2064 /* XXX Handle target logout */
2065 shost_printk(KERN_WARNING, target->scsi_host,
2066 PFX "Got target logout request\n");
2070 shost_printk(KERN_WARNING, target->scsi_host,
2071 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
2075 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len,
2078 res = srp_post_recv(ch, iu);
2080 shost_printk(KERN_ERR, target->scsi_host,
2081 PFX "Recv failed with error code %d\n", res);
2085 * srp_tl_err_work() - handle a transport layer error
2086 * @work: Work structure embedded in an SRP target port.
2088 * Note: This function may get invoked before the rport has been created,
2089 * hence the target->rport test.
2091 static void srp_tl_err_work(struct work_struct *work)
2093 struct srp_target_port *target;
2095 target = container_of(work, struct srp_target_port, tl_err_work);
2097 srp_start_tl_fail_timers(target->rport);
2100 static void srp_handle_qp_err(struct ib_cq *cq, struct ib_wc *wc,
2103 struct srp_rdma_ch *ch = cq->cq_context;
2104 struct srp_target_port *target = ch->target;
2106 if (ch->connected && !target->qp_in_error) {
2107 shost_printk(KERN_ERR, target->scsi_host,
2108 PFX "failed %s status %s (%d) for CQE %p\n",
2109 opname, ib_wc_status_msg(wc->status), wc->status,
2111 queue_work(system_long_wq, &target->tl_err_work);
2113 target->qp_in_error = true;
2116 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
2118 struct srp_target_port *target = host_to_target(shost);
2119 struct srp_rport *rport = target->rport;
2120 struct srp_rdma_ch *ch;
2121 struct srp_request *req;
2123 struct srp_cmd *cmd;
2124 struct ib_device *dev;
2125 unsigned long flags;
2129 const bool in_scsi_eh = !in_interrupt() && current == shost->ehandler;
2132 * The SCSI EH thread is the only context from which srp_queuecommand()
2133 * can get invoked for blocked devices (SDEV_BLOCK /
2134 * SDEV_CREATED_BLOCK). Avoid racing with srp_reconnect_rport() by
2135 * locking the rport mutex if invoked from inside the SCSI EH.
2138 mutex_lock(&rport->mutex);
2140 scmnd->result = srp_chkready(target->rport);
2141 if (unlikely(scmnd->result))
2144 WARN_ON_ONCE(scmnd->request->tag < 0);
2145 tag = blk_mq_unique_tag(scmnd->request);
2146 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)];
2147 idx = blk_mq_unique_tag_to_tag(tag);
2148 WARN_ONCE(idx >= target->req_ring_size, "%s: tag %#x: idx %d >= %d\n",
2149 dev_name(&shost->shost_gendev), tag, idx,
2150 target->req_ring_size);
2152 spin_lock_irqsave(&ch->lock, flags);
2153 iu = __srp_get_tx_iu(ch, SRP_IU_CMD);
2154 spin_unlock_irqrestore(&ch->lock, flags);
2159 req = &ch->req_ring[idx];
2160 dev = target->srp_host->srp_dev->dev;
2161 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_iu_len,
2164 scmnd->host_scribble = (void *) req;
2167 memset(cmd, 0, sizeof *cmd);
2169 cmd->opcode = SRP_CMD;
2170 int_to_scsilun(scmnd->device->lun, &cmd->lun);
2172 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
2177 len = srp_map_data(scmnd, ch, req);
2179 shost_printk(KERN_ERR, target->scsi_host,
2180 PFX "Failed to map data (%d)\n", len);
2182 * If we ran out of memory descriptors (-ENOMEM) because an
2183 * application is queuing many requests with more than
2184 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer
2185 * to reduce queue depth temporarily.
2187 scmnd->result = len == -ENOMEM ?
2188 DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
2192 ib_dma_sync_single_for_device(dev, iu->dma, target->max_iu_len,
2195 if (srp_post_send(ch, iu, len)) {
2196 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
2204 mutex_unlock(&rport->mutex);
2209 srp_unmap_data(scmnd, ch, req);
2212 srp_put_tx_iu(ch, iu, SRP_IU_CMD);
2215 * Avoid that the loops that iterate over the request ring can
2216 * encounter a dangling SCSI command pointer.
2221 if (scmnd->result) {
2222 scmnd->scsi_done(scmnd);
2225 ret = SCSI_MLQUEUE_HOST_BUSY;
2232 * Note: the resources allocated in this function are freed in
2235 static int srp_alloc_iu_bufs(struct srp_rdma_ch *ch)
2237 struct srp_target_port *target = ch->target;
2240 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring),
2244 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring),
2249 for (i = 0; i < target->queue_size; ++i) {
2250 ch->rx_ring[i] = srp_alloc_iu(target->srp_host,
2252 GFP_KERNEL, DMA_FROM_DEVICE);
2253 if (!ch->rx_ring[i])
2257 for (i = 0; i < target->queue_size; ++i) {
2258 ch->tx_ring[i] = srp_alloc_iu(target->srp_host,
2260 GFP_KERNEL, DMA_TO_DEVICE);
2261 if (!ch->tx_ring[i])
2264 list_add(&ch->tx_ring[i]->list, &ch->free_tx);
2270 for (i = 0; i < target->queue_size; ++i) {
2271 srp_free_iu(target->srp_host, ch->rx_ring[i]);
2272 srp_free_iu(target->srp_host, ch->tx_ring[i]);
2285 static uint32_t srp_compute_rq_tmo(struct ib_qp_attr *qp_attr, int attr_mask)
2287 uint64_t T_tr_ns, max_compl_time_ms;
2288 uint32_t rq_tmo_jiffies;
2291 * According to section 11.2.4.2 in the IBTA spec (Modify Queue Pair,
2292 * table 91), both the QP timeout and the retry count have to be set
2293 * for RC QP's during the RTR to RTS transition.
2295 WARN_ON_ONCE((attr_mask & (IB_QP_TIMEOUT | IB_QP_RETRY_CNT)) !=
2296 (IB_QP_TIMEOUT | IB_QP_RETRY_CNT));
2299 * Set target->rq_tmo_jiffies to one second more than the largest time
2300 * it can take before an error completion is generated. See also
2301 * C9-140..142 in the IBTA spec for more information about how to
2302 * convert the QP Local ACK Timeout value to nanoseconds.
2304 T_tr_ns = 4096 * (1ULL << qp_attr->timeout);
2305 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns;
2306 do_div(max_compl_time_ms, NSEC_PER_MSEC);
2307 rq_tmo_jiffies = msecs_to_jiffies(max_compl_time_ms + 1000);
2309 return rq_tmo_jiffies;
2312 static void srp_cm_rep_handler(struct ib_cm_id *cm_id,
2313 const struct srp_login_rsp *lrsp,
2314 struct srp_rdma_ch *ch)
2316 struct srp_target_port *target = ch->target;
2317 struct ib_qp_attr *qp_attr = NULL;
2322 if (lrsp->opcode == SRP_LOGIN_RSP) {
2323 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len);
2324 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta);
2327 * Reserve credits for task management so we don't
2328 * bounce requests back to the SCSI mid-layer.
2330 target->scsi_host->can_queue
2331 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE,
2332 target->scsi_host->can_queue);
2333 target->scsi_host->cmd_per_lun
2334 = min_t(int, target->scsi_host->can_queue,
2335 target->scsi_host->cmd_per_lun);
2337 shost_printk(KERN_WARNING, target->scsi_host,
2338 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode);
2344 ret = srp_alloc_iu_bufs(ch);
2350 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
2354 qp_attr->qp_state = IB_QPS_RTR;
2355 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2359 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2363 for (i = 0; i < target->queue_size; i++) {
2364 struct srp_iu *iu = ch->rx_ring[i];
2366 ret = srp_post_recv(ch, iu);
2371 qp_attr->qp_state = IB_QPS_RTS;
2372 ret = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
2376 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask);
2378 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask);
2382 ret = ib_send_cm_rtu(cm_id, NULL, 0);
2391 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
2392 struct ib_cm_event *event,
2393 struct srp_rdma_ch *ch)
2395 struct srp_target_port *target = ch->target;
2396 struct Scsi_Host *shost = target->scsi_host;
2397 struct ib_class_port_info *cpi;
2400 switch (event->param.rej_rcvd.reason) {
2401 case IB_CM_REJ_PORT_CM_REDIRECT:
2402 cpi = event->param.rej_rcvd.ari;
2403 sa_path_set_dlid(&ch->path, htonl(ntohs(cpi->redirect_lid)));
2404 ch->path.pkey = cpi->redirect_pkey;
2405 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
2406 memcpy(ch->path.dgid.raw, cpi->redirect_gid, 16);
2408 ch->status = sa_path_get_dlid(&ch->path) ?
2409 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
2412 case IB_CM_REJ_PORT_REDIRECT:
2413 if (srp_target_is_topspin(target)) {
2415 * Topspin/Cisco SRP gateways incorrectly send
2416 * reject reason code 25 when they mean 24
2419 memcpy(ch->path.dgid.raw,
2420 event->param.rej_rcvd.ari, 16);
2422 shost_printk(KERN_DEBUG, shost,
2423 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
2424 be64_to_cpu(ch->path.dgid.global.subnet_prefix),
2425 be64_to_cpu(ch->path.dgid.global.interface_id));
2427 ch->status = SRP_PORT_REDIRECT;
2429 shost_printk(KERN_WARNING, shost,
2430 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
2431 ch->status = -ECONNRESET;
2435 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
2436 shost_printk(KERN_WARNING, shost,
2437 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
2438 ch->status = -ECONNRESET;
2441 case IB_CM_REJ_CONSUMER_DEFINED:
2442 opcode = *(u8 *) event->private_data;
2443 if (opcode == SRP_LOGIN_REJ) {
2444 struct srp_login_rej *rej = event->private_data;
2445 u32 reason = be32_to_cpu(rej->reason);
2447 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
2448 shost_printk(KERN_WARNING, shost,
2449 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
2451 shost_printk(KERN_WARNING, shost, PFX
2452 "SRP LOGIN from %pI6 to %pI6 REJECTED, reason 0x%08x\n",
2454 target->orig_dgid.raw, reason);
2456 shost_printk(KERN_WARNING, shost,
2457 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
2458 " opcode 0x%02x\n", opcode);
2459 ch->status = -ECONNRESET;
2462 case IB_CM_REJ_STALE_CONN:
2463 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
2464 ch->status = SRP_STALE_CONN;
2468 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
2469 event->param.rej_rcvd.reason);
2470 ch->status = -ECONNRESET;
2474 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
2476 struct srp_rdma_ch *ch = cm_id->context;
2477 struct srp_target_port *target = ch->target;
2480 switch (event->event) {
2481 case IB_CM_REQ_ERROR:
2482 shost_printk(KERN_DEBUG, target->scsi_host,
2483 PFX "Sending CM REQ failed\n");
2485 ch->status = -ECONNRESET;
2488 case IB_CM_REP_RECEIVED:
2490 srp_cm_rep_handler(cm_id, event->private_data, ch);
2493 case IB_CM_REJ_RECEIVED:
2494 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
2497 srp_cm_rej_handler(cm_id, event, ch);
2500 case IB_CM_DREQ_RECEIVED:
2501 shost_printk(KERN_WARNING, target->scsi_host,
2502 PFX "DREQ received - connection closed\n");
2503 ch->connected = false;
2504 if (ib_send_cm_drep(cm_id, NULL, 0))
2505 shost_printk(KERN_ERR, target->scsi_host,
2506 PFX "Sending CM DREP failed\n");
2507 queue_work(system_long_wq, &target->tl_err_work);
2510 case IB_CM_TIMEWAIT_EXIT:
2511 shost_printk(KERN_ERR, target->scsi_host,
2512 PFX "connection closed\n");
2518 case IB_CM_MRA_RECEIVED:
2519 case IB_CM_DREQ_ERROR:
2520 case IB_CM_DREP_RECEIVED:
2524 shost_printk(KERN_WARNING, target->scsi_host,
2525 PFX "Unhandled CM event %d\n", event->event);
2530 complete(&ch->done);
2536 * srp_change_queue_depth - setting device queue depth
2537 * @sdev: scsi device struct
2538 * @qdepth: requested queue depth
2540 * Returns queue depth.
2543 srp_change_queue_depth(struct scsi_device *sdev, int qdepth)
2545 if (!sdev->tagged_supported)
2547 return scsi_change_queue_depth(sdev, qdepth);
2550 static int srp_send_tsk_mgmt(struct srp_rdma_ch *ch, u64 req_tag, u64 lun,
2551 u8 func, u8 *status)
2553 struct srp_target_port *target = ch->target;
2554 struct srp_rport *rport = target->rport;
2555 struct ib_device *dev = target->srp_host->srp_dev->dev;
2557 struct srp_tsk_mgmt *tsk_mgmt;
2560 if (!ch->connected || target->qp_in_error)
2564 * Lock the rport mutex to avoid that srp_create_ch_ib() is
2565 * invoked while a task management function is being sent.
2567 mutex_lock(&rport->mutex);
2568 spin_lock_irq(&ch->lock);
2569 iu = __srp_get_tx_iu(ch, SRP_IU_TSK_MGMT);
2570 spin_unlock_irq(&ch->lock);
2573 mutex_unlock(&rport->mutex);
2578 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
2581 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
2583 tsk_mgmt->opcode = SRP_TSK_MGMT;
2584 int_to_scsilun(lun, &tsk_mgmt->lun);
2585 tsk_mgmt->tsk_mgmt_func = func;
2586 tsk_mgmt->task_tag = req_tag;
2588 spin_lock_irq(&ch->lock);
2589 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT;
2590 tsk_mgmt->tag = ch->tsk_mgmt_tag;
2591 spin_unlock_irq(&ch->lock);
2593 init_completion(&ch->tsk_mgmt_done);
2595 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
2597 if (srp_post_send(ch, iu, sizeof(*tsk_mgmt))) {
2598 srp_put_tx_iu(ch, iu, SRP_IU_TSK_MGMT);
2599 mutex_unlock(&rport->mutex);
2603 res = wait_for_completion_timeout(&ch->tsk_mgmt_done,
2604 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS));
2605 if (res > 0 && status)
2606 *status = ch->tsk_mgmt_status;
2607 mutex_unlock(&rport->mutex);
2609 WARN_ON_ONCE(res < 0);
2611 return res > 0 ? 0 : -1;
2614 static int srp_abort(struct scsi_cmnd *scmnd)
2616 struct srp_target_port *target = host_to_target(scmnd->device->host);
2617 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
2620 struct srp_rdma_ch *ch;
2623 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
2627 tag = blk_mq_unique_tag(scmnd->request);
2628 ch_idx = blk_mq_unique_tag_to_hwq(tag);
2629 if (WARN_ON_ONCE(ch_idx >= target->ch_count))
2631 ch = &target->ch[ch_idx];
2632 if (!srp_claim_req(ch, req, NULL, scmnd))
2634 shost_printk(KERN_ERR, target->scsi_host,
2635 "Sending SRP abort for tag %#x\n", tag);
2636 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun,
2637 SRP_TSK_ABORT_TASK, NULL) == 0)
2639 else if (target->rport->state == SRP_RPORT_LOST)
2643 srp_free_req(ch, req, scmnd, 0);
2644 scmnd->result = DID_ABORT << 16;
2645 scmnd->scsi_done(scmnd);
2650 static int srp_reset_device(struct scsi_cmnd *scmnd)
2652 struct srp_target_port *target = host_to_target(scmnd->device->host);
2653 struct srp_rdma_ch *ch;
2657 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
2659 ch = &target->ch[0];
2660 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun,
2661 SRP_TSK_LUN_RESET, &status))
2666 for (i = 0; i < target->ch_count; i++) {
2667 ch = &target->ch[i];
2668 for (i = 0; i < target->req_ring_size; ++i) {
2669 struct srp_request *req = &ch->req_ring[i];
2671 srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
2678 static int srp_reset_host(struct scsi_cmnd *scmnd)
2680 struct srp_target_port *target = host_to_target(scmnd->device->host);
2682 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
2684 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED;
2687 static int srp_slave_alloc(struct scsi_device *sdev)
2689 struct Scsi_Host *shost = sdev->host;
2690 struct srp_target_port *target = host_to_target(shost);
2691 struct srp_device *srp_dev = target->srp_host->srp_dev;
2694 blk_queue_virt_boundary(sdev->request_queue,
2695 ~srp_dev->mr_page_mask);
2700 static int srp_slave_configure(struct scsi_device *sdev)
2702 struct Scsi_Host *shost = sdev->host;
2703 struct srp_target_port *target = host_to_target(shost);
2704 struct request_queue *q = sdev->request_queue;
2705 unsigned long timeout;
2707 if (sdev->type == TYPE_DISK) {
2708 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies);
2709 blk_queue_rq_timeout(q, timeout);
2715 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
2718 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2720 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->id_ext));
2723 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
2726 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2728 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid));
2731 static ssize_t show_service_id(struct device *dev,
2732 struct device_attribute *attr, char *buf)
2734 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2736 return sprintf(buf, "0x%016llx\n", be64_to_cpu(target->service_id));
2739 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
2742 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2744 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->pkey));
2747 static ssize_t show_sgid(struct device *dev, struct device_attribute *attr,
2750 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2752 return sprintf(buf, "%pI6\n", target->sgid.raw);
2755 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
2758 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2759 struct srp_rdma_ch *ch = &target->ch[0];
2761 return sprintf(buf, "%pI6\n", ch->path.dgid.raw);
2764 static ssize_t show_orig_dgid(struct device *dev,
2765 struct device_attribute *attr, char *buf)
2767 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2769 return sprintf(buf, "%pI6\n", target->orig_dgid.raw);
2772 static ssize_t show_req_lim(struct device *dev,
2773 struct device_attribute *attr, char *buf)
2775 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2776 struct srp_rdma_ch *ch;
2777 int i, req_lim = INT_MAX;
2779 for (i = 0; i < target->ch_count; i++) {
2780 ch = &target->ch[i];
2781 req_lim = min(req_lim, ch->req_lim);
2783 return sprintf(buf, "%d\n", req_lim);
2786 static ssize_t show_zero_req_lim(struct device *dev,
2787 struct device_attribute *attr, char *buf)
2789 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2791 return sprintf(buf, "%d\n", target->zero_req_lim);
2794 static ssize_t show_local_ib_port(struct device *dev,
2795 struct device_attribute *attr, char *buf)
2797 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2799 return sprintf(buf, "%d\n", target->srp_host->port);
2802 static ssize_t show_local_ib_device(struct device *dev,
2803 struct device_attribute *attr, char *buf)
2805 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2807 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
2810 static ssize_t show_ch_count(struct device *dev, struct device_attribute *attr,
2813 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2815 return sprintf(buf, "%d\n", target->ch_count);
2818 static ssize_t show_comp_vector(struct device *dev,
2819 struct device_attribute *attr, char *buf)
2821 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2823 return sprintf(buf, "%d\n", target->comp_vector);
2826 static ssize_t show_tl_retry_count(struct device *dev,
2827 struct device_attribute *attr, char *buf)
2829 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2831 return sprintf(buf, "%d\n", target->tl_retry_count);
2834 static ssize_t show_cmd_sg_entries(struct device *dev,
2835 struct device_attribute *attr, char *buf)
2837 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2839 return sprintf(buf, "%u\n", target->cmd_sg_cnt);
2842 static ssize_t show_allow_ext_sg(struct device *dev,
2843 struct device_attribute *attr, char *buf)
2845 struct srp_target_port *target = host_to_target(class_to_shost(dev));
2847 return sprintf(buf, "%s\n", target->allow_ext_sg ? "true" : "false");
2850 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
2851 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
2852 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
2853 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
2854 static DEVICE_ATTR(sgid, S_IRUGO, show_sgid, NULL);
2855 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
2856 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
2857 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
2858 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
2859 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
2860 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
2861 static DEVICE_ATTR(ch_count, S_IRUGO, show_ch_count, NULL);
2862 static DEVICE_ATTR(comp_vector, S_IRUGO, show_comp_vector, NULL);
2863 static DEVICE_ATTR(tl_retry_count, S_IRUGO, show_tl_retry_count, NULL);
2864 static DEVICE_ATTR(cmd_sg_entries, S_IRUGO, show_cmd_sg_entries, NULL);
2865 static DEVICE_ATTR(allow_ext_sg, S_IRUGO, show_allow_ext_sg, NULL);
2867 static struct device_attribute *srp_host_attrs[] = {
2870 &dev_attr_service_id,
2874 &dev_attr_orig_dgid,
2876 &dev_attr_zero_req_lim,
2877 &dev_attr_local_ib_port,
2878 &dev_attr_local_ib_device,
2880 &dev_attr_comp_vector,
2881 &dev_attr_tl_retry_count,
2882 &dev_attr_cmd_sg_entries,
2883 &dev_attr_allow_ext_sg,
2887 static struct scsi_host_template srp_template = {
2888 .module = THIS_MODULE,
2889 .name = "InfiniBand SRP initiator",
2890 .proc_name = DRV_NAME,
2891 .slave_alloc = srp_slave_alloc,
2892 .slave_configure = srp_slave_configure,
2893 .info = srp_target_info,
2894 .queuecommand = srp_queuecommand,
2895 .change_queue_depth = srp_change_queue_depth,
2896 .eh_timed_out = srp_timed_out,
2897 .eh_abort_handler = srp_abort,
2898 .eh_device_reset_handler = srp_reset_device,
2899 .eh_host_reset_handler = srp_reset_host,
2900 .skip_settle_delay = true,
2901 .sg_tablesize = SRP_DEF_SG_TABLESIZE,
2902 .can_queue = SRP_DEFAULT_CMD_SQ_SIZE,
2904 .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE,
2905 .use_clustering = ENABLE_CLUSTERING,
2906 .shost_attrs = srp_host_attrs,
2907 .track_queue_depth = 1,
2910 static int srp_sdev_count(struct Scsi_Host *host)
2912 struct scsi_device *sdev;
2915 shost_for_each_device(sdev, host)
2923 * < 0 upon failure. Caller is responsible for SRP target port cleanup.
2924 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
2925 * removal has been scheduled.
2926 * 0 and target->state != SRP_TARGET_REMOVED upon success.
2928 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
2930 struct srp_rport_identifiers ids;
2931 struct srp_rport *rport;
2933 target->state = SRP_TARGET_SCANNING;
2934 sprintf(target->target_name, "SRP.T10:%016llX",
2935 be64_to_cpu(target->id_ext));
2937 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent))
2940 memcpy(ids.port_id, &target->id_ext, 8);
2941 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
2942 ids.roles = SRP_RPORT_ROLE_TARGET;
2943 rport = srp_rport_add(target->scsi_host, &ids);
2944 if (IS_ERR(rport)) {
2945 scsi_remove_host(target->scsi_host);
2946 return PTR_ERR(rport);
2949 rport->lld_data = target;
2950 target->rport = rport;
2952 spin_lock(&host->target_lock);
2953 list_add_tail(&target->list, &host->target_list);
2954 spin_unlock(&host->target_lock);
2956 scsi_scan_target(&target->scsi_host->shost_gendev,
2957 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
2959 if (srp_connected_ch(target) < target->ch_count ||
2960 target->qp_in_error) {
2961 shost_printk(KERN_INFO, target->scsi_host,
2962 PFX "SCSI scan failed - removing SCSI host\n");
2963 srp_queue_remove_work(target);
2967 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n",
2968 dev_name(&target->scsi_host->shost_gendev),
2969 srp_sdev_count(target->scsi_host));
2971 spin_lock_irq(&target->lock);
2972 if (target->state == SRP_TARGET_SCANNING)
2973 target->state = SRP_TARGET_LIVE;
2974 spin_unlock_irq(&target->lock);
2980 static void srp_release_dev(struct device *dev)
2982 struct srp_host *host =
2983 container_of(dev, struct srp_host, dev);
2985 complete(&host->released);
2988 static struct class srp_class = {
2989 .name = "infiniband_srp",
2990 .dev_release = srp_release_dev
2994 * srp_conn_unique() - check whether the connection to a target is unique
2996 * @target: SRP target port.
2998 static bool srp_conn_unique(struct srp_host *host,
2999 struct srp_target_port *target)
3001 struct srp_target_port *t;
3004 if (target->state == SRP_TARGET_REMOVED)
3009 spin_lock(&host->target_lock);
3010 list_for_each_entry(t, &host->target_list, list) {
3012 target->id_ext == t->id_ext &&
3013 target->ioc_guid == t->ioc_guid &&
3014 target->initiator_ext == t->initiator_ext) {
3019 spin_unlock(&host->target_lock);
3026 * Target ports are added by writing
3028 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
3029 * pkey=<P_Key>,service_id=<service ID>
3031 * to the add_target sysfs attribute.
3035 SRP_OPT_ID_EXT = 1 << 0,
3036 SRP_OPT_IOC_GUID = 1 << 1,
3037 SRP_OPT_DGID = 1 << 2,
3038 SRP_OPT_PKEY = 1 << 3,
3039 SRP_OPT_SERVICE_ID = 1 << 4,
3040 SRP_OPT_MAX_SECT = 1 << 5,
3041 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
3042 SRP_OPT_IO_CLASS = 1 << 7,
3043 SRP_OPT_INITIATOR_EXT = 1 << 8,
3044 SRP_OPT_CMD_SG_ENTRIES = 1 << 9,
3045 SRP_OPT_ALLOW_EXT_SG = 1 << 10,
3046 SRP_OPT_SG_TABLESIZE = 1 << 11,
3047 SRP_OPT_COMP_VECTOR = 1 << 12,
3048 SRP_OPT_TL_RETRY_COUNT = 1 << 13,
3049 SRP_OPT_QUEUE_SIZE = 1 << 14,
3050 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
3054 SRP_OPT_SERVICE_ID),
3057 static const match_table_t srp_opt_tokens = {
3058 { SRP_OPT_ID_EXT, "id_ext=%s" },
3059 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
3060 { SRP_OPT_DGID, "dgid=%s" },
3061 { SRP_OPT_PKEY, "pkey=%x" },
3062 { SRP_OPT_SERVICE_ID, "service_id=%s" },
3063 { SRP_OPT_MAX_SECT, "max_sect=%d" },
3064 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
3065 { SRP_OPT_IO_CLASS, "io_class=%x" },
3066 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
3067 { SRP_OPT_CMD_SG_ENTRIES, "cmd_sg_entries=%u" },
3068 { SRP_OPT_ALLOW_EXT_SG, "allow_ext_sg=%u" },
3069 { SRP_OPT_SG_TABLESIZE, "sg_tablesize=%u" },
3070 { SRP_OPT_COMP_VECTOR, "comp_vector=%u" },
3071 { SRP_OPT_TL_RETRY_COUNT, "tl_retry_count=%u" },
3072 { SRP_OPT_QUEUE_SIZE, "queue_size=%d" },
3073 { SRP_OPT_ERR, NULL }
3076 static int srp_parse_options(const char *buf, struct srp_target_port *target)
3078 char *options, *sep_opt;
3081 substring_t args[MAX_OPT_ARGS];
3087 options = kstrdup(buf, GFP_KERNEL);
3092 while ((p = strsep(&sep_opt, ",\n")) != NULL) {
3096 token = match_token(p, srp_opt_tokens, args);
3100 case SRP_OPT_ID_EXT:
3101 p = match_strdup(args);
3106 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3110 case SRP_OPT_IOC_GUID:
3111 p = match_strdup(args);
3116 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
3121 p = match_strdup(args);
3126 if (strlen(p) != 32) {
3127 pr_warn("bad dest GID parameter '%s'\n", p);
3132 for (i = 0; i < 16; ++i) {
3133 strlcpy(dgid, p + i * 2, sizeof(dgid));
3134 if (sscanf(dgid, "%hhx",
3135 &target->orig_dgid.raw[i]) < 1) {
3145 if (match_hex(args, &token)) {
3146 pr_warn("bad P_Key parameter '%s'\n", p);
3149 target->pkey = cpu_to_be16(token);
3152 case SRP_OPT_SERVICE_ID:
3153 p = match_strdup(args);
3158 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
3162 case SRP_OPT_MAX_SECT:
3163 if (match_int(args, &token)) {
3164 pr_warn("bad max sect parameter '%s'\n", p);
3167 target->scsi_host->max_sectors = token;
3170 case SRP_OPT_QUEUE_SIZE:
3171 if (match_int(args, &token) || token < 1) {
3172 pr_warn("bad queue_size parameter '%s'\n", p);
3175 target->scsi_host->can_queue = token;
3176 target->queue_size = token + SRP_RSP_SQ_SIZE +
3177 SRP_TSK_MGMT_SQ_SIZE;
3178 if (!(opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3179 target->scsi_host->cmd_per_lun = token;
3182 case SRP_OPT_MAX_CMD_PER_LUN:
3183 if (match_int(args, &token) || token < 1) {
3184 pr_warn("bad max cmd_per_lun parameter '%s'\n",
3188 target->scsi_host->cmd_per_lun = token;
3191 case SRP_OPT_IO_CLASS:
3192 if (match_hex(args, &token)) {
3193 pr_warn("bad IO class parameter '%s'\n", p);
3196 if (token != SRP_REV10_IB_IO_CLASS &&
3197 token != SRP_REV16A_IB_IO_CLASS) {
3198 pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n",
3199 token, SRP_REV10_IB_IO_CLASS,
3200 SRP_REV16A_IB_IO_CLASS);
3203 target->io_class = token;
3206 case SRP_OPT_INITIATOR_EXT:
3207 p = match_strdup(args);
3212 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
3216 case SRP_OPT_CMD_SG_ENTRIES:
3217 if (match_int(args, &token) || token < 1 || token > 255) {
3218 pr_warn("bad max cmd_sg_entries parameter '%s'\n",
3222 target->cmd_sg_cnt = token;
3225 case SRP_OPT_ALLOW_EXT_SG:
3226 if (match_int(args, &token)) {
3227 pr_warn("bad allow_ext_sg parameter '%s'\n", p);
3230 target->allow_ext_sg = !!token;
3233 case SRP_OPT_SG_TABLESIZE:
3234 if (match_int(args, &token) || token < 1 ||
3235 token > SG_MAX_SEGMENTS) {
3236 pr_warn("bad max sg_tablesize parameter '%s'\n",
3240 target->sg_tablesize = token;
3243 case SRP_OPT_COMP_VECTOR:
3244 if (match_int(args, &token) || token < 0) {
3245 pr_warn("bad comp_vector parameter '%s'\n", p);
3248 target->comp_vector = token;
3251 case SRP_OPT_TL_RETRY_COUNT:
3252 if (match_int(args, &token) || token < 2 || token > 7) {
3253 pr_warn("bad tl_retry_count parameter '%s' (must be a number between 2 and 7)\n",
3257 target->tl_retry_count = token;
3261 pr_warn("unknown parameter or missing value '%s' in target creation request\n",
3267 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
3270 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
3271 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
3272 !(srp_opt_tokens[i].token & opt_mask))
3273 pr_warn("target creation request is missing parameter '%s'\n",
3274 srp_opt_tokens[i].pattern);
3276 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue
3277 && (opt_mask & SRP_OPT_MAX_CMD_PER_LUN))
3278 pr_warn("cmd_per_lun = %d > queue_size = %d\n",
3279 target->scsi_host->cmd_per_lun,
3280 target->scsi_host->can_queue);
3287 static ssize_t srp_create_target(struct device *dev,
3288 struct device_attribute *attr,
3289 const char *buf, size_t count)
3291 struct srp_host *host =
3292 container_of(dev, struct srp_host, dev);
3293 struct Scsi_Host *target_host;
3294 struct srp_target_port *target;
3295 struct srp_rdma_ch *ch;
3296 struct srp_device *srp_dev = host->srp_dev;
3297 struct ib_device *ibdev = srp_dev->dev;
3298 int ret, node_idx, node, cpu, i;
3299 unsigned int max_sectors_per_mr, mr_per_cmd = 0;
3300 bool multich = false;
3302 target_host = scsi_host_alloc(&srp_template,
3303 sizeof (struct srp_target_port));
3307 target_host->transportt = ib_srp_transport_template;
3308 target_host->max_channel = 0;
3309 target_host->max_id = 1;
3310 target_host->max_lun = -1LL;
3311 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
3313 target = host_to_target(target_host);
3315 target->io_class = SRP_REV16A_IB_IO_CLASS;
3316 target->scsi_host = target_host;
3317 target->srp_host = host;
3318 target->pd = host->srp_dev->pd;
3319 target->lkey = host->srp_dev->pd->local_dma_lkey;
3320 target->cmd_sg_cnt = cmd_sg_entries;
3321 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries;
3322 target->allow_ext_sg = allow_ext_sg;
3323 target->tl_retry_count = 7;
3324 target->queue_size = SRP_DEFAULT_QUEUE_SIZE;
3327 * Avoid that the SCSI host can be removed by srp_remove_target()
3328 * before this function returns.
3330 scsi_host_get(target->scsi_host);
3332 ret = mutex_lock_interruptible(&host->add_target_mutex);
3336 ret = srp_parse_options(buf, target);
3340 target->req_ring_size = target->queue_size - SRP_TSK_MGMT_SQ_SIZE;
3342 if (!srp_conn_unique(target->srp_host, target)) {
3343 shost_printk(KERN_INFO, target->scsi_host,
3344 PFX "Already connected to target port with id_ext=%016llx;ioc_guid=%016llx;initiator_ext=%016llx\n",
3345 be64_to_cpu(target->id_ext),
3346 be64_to_cpu(target->ioc_guid),
3347 be64_to_cpu(target->initiator_ext));
3352 if (!srp_dev->has_fmr && !srp_dev->has_fr && !target->allow_ext_sg &&
3353 target->cmd_sg_cnt < target->sg_tablesize) {
3354 pr_warn("No MR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n");
3355 target->sg_tablesize = target->cmd_sg_cnt;
3358 if (srp_dev->use_fast_reg || srp_dev->use_fmr) {
3360 * FR and FMR can only map one HCA page per entry. If the
3361 * start address is not aligned on a HCA page boundary two
3362 * entries will be used for the head and the tail although
3363 * these two entries combined contain at most one HCA page of
3364 * data. Hence the "+ 1" in the calculation below.
3366 * The indirect data buffer descriptor is contiguous so the
3367 * memory for that buffer will only be registered if
3368 * register_always is true. Hence add one to mr_per_cmd if
3369 * register_always has been set.
3371 max_sectors_per_mr = srp_dev->max_pages_per_mr <<
3372 (ilog2(srp_dev->mr_page_size) - 9);
3373 mr_per_cmd = register_always +
3374 (target->scsi_host->max_sectors + 1 +
3375 max_sectors_per_mr - 1) / max_sectors_per_mr;
3376 pr_debug("max_sectors = %u; max_pages_per_mr = %u; mr_page_size = %u; max_sectors_per_mr = %u; mr_per_cmd = %u\n",
3377 target->scsi_host->max_sectors,
3378 srp_dev->max_pages_per_mr, srp_dev->mr_page_size,
3379 max_sectors_per_mr, mr_per_cmd);
3382 target_host->sg_tablesize = target->sg_tablesize;
3383 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd;
3384 target->mr_per_cmd = mr_per_cmd;
3385 target->indirect_size = target->sg_tablesize *
3386 sizeof (struct srp_direct_buf);
3387 target->max_iu_len = sizeof (struct srp_cmd) +
3388 sizeof (struct srp_indirect_buf) +
3389 target->cmd_sg_cnt * sizeof (struct srp_direct_buf);
3391 INIT_WORK(&target->tl_err_work, srp_tl_err_work);
3392 INIT_WORK(&target->remove_work, srp_remove_work);
3393 spin_lock_init(&target->lock);
3394 ret = ib_query_gid(ibdev, host->port, 0, &target->sgid, NULL);
3399 target->ch_count = max_t(unsigned, num_online_nodes(),
3401 min(4 * num_online_nodes(),
3402 ibdev->num_comp_vectors),
3403 num_online_cpus()));
3404 target->ch = kcalloc(target->ch_count, sizeof(*target->ch),
3410 for_each_online_node(node) {
3411 const int ch_start = (node_idx * target->ch_count /
3412 num_online_nodes());
3413 const int ch_end = ((node_idx + 1) * target->ch_count /
3414 num_online_nodes());
3415 const int cv_start = (node_idx * ibdev->num_comp_vectors /
3416 num_online_nodes() + target->comp_vector)
3417 % ibdev->num_comp_vectors;
3418 const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
3419 num_online_nodes() + target->comp_vector)
3420 % ibdev->num_comp_vectors;
3423 for_each_online_cpu(cpu) {
3424 if (cpu_to_node(cpu) != node)
3426 if (ch_start + cpu_idx >= ch_end)
3428 ch = &target->ch[ch_start + cpu_idx];
3429 ch->target = target;
3430 ch->comp_vector = cv_start == cv_end ? cv_start :
3431 cv_start + cpu_idx % (cv_end - cv_start);
3432 spin_lock_init(&ch->lock);
3433 INIT_LIST_HEAD(&ch->free_tx);
3434 ret = srp_new_cm_id(ch);
3436 goto err_disconnect;
3438 ret = srp_create_ch_ib(ch);
3440 goto err_disconnect;
3442 ret = srp_alloc_req_data(ch);
3444 goto err_disconnect;
3446 ret = srp_connect_ch(ch, multich);
3448 shost_printk(KERN_ERR, target->scsi_host,
3449 PFX "Connection %d/%d to %pI6 failed\n",
3452 ch->target->orig_dgid.raw);
3453 if (node_idx == 0 && cpu_idx == 0) {
3456 srp_free_ch_ib(target, ch);
3457 srp_free_req_data(target, ch);
3458 target->ch_count = ch - target->ch;
3470 target->scsi_host->nr_hw_queues = target->ch_count;
3472 ret = srp_add_target(host, target);
3474 goto err_disconnect;
3476 if (target->state != SRP_TARGET_REMOVED) {
3477 shost_printk(KERN_DEBUG, target->scsi_host, PFX
3478 "new target: id_ext %016llx ioc_guid %016llx pkey %04x service_id %016llx sgid %pI6 dgid %pI6\n",
3479 be64_to_cpu(target->id_ext),
3480 be64_to_cpu(target->ioc_guid),
3481 be16_to_cpu(target->pkey),
3482 be64_to_cpu(target->service_id),
3483 target->sgid.raw, target->orig_dgid.raw);
3489 mutex_unlock(&host->add_target_mutex);
3492 scsi_host_put(target->scsi_host);
3494 scsi_host_put(target->scsi_host);
3499 srp_disconnect_target(target);
3502 for (i = 0; i < target->ch_count; i++) {
3503 ch = &target->ch[i];
3504 srp_free_ch_ib(target, ch);
3505 srp_free_req_data(target, ch);
3512 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
3514 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
3517 struct srp_host *host = container_of(dev, struct srp_host, dev);
3519 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
3522 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
3524 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
3527 struct srp_host *host = container_of(dev, struct srp_host, dev);
3529 return sprintf(buf, "%d\n", host->port);
3532 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
3534 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
3536 struct srp_host *host;
3538 host = kzalloc(sizeof *host, GFP_KERNEL);
3542 INIT_LIST_HEAD(&host->target_list);
3543 spin_lock_init(&host->target_lock);
3544 init_completion(&host->released);
3545 mutex_init(&host->add_target_mutex);
3546 host->srp_dev = device;
3549 host->dev.class = &srp_class;
3550 host->dev.parent = device->dev->dev.parent;
3551 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
3553 if (device_register(&host->dev))
3555 if (device_create_file(&host->dev, &dev_attr_add_target))
3557 if (device_create_file(&host->dev, &dev_attr_ibdev))
3559 if (device_create_file(&host->dev, &dev_attr_port))
3565 device_unregister(&host->dev);
3573 static void srp_add_one(struct ib_device *device)
3575 struct srp_device *srp_dev;
3576 struct ib_device_attr *attr = &device->attrs;
3577 struct srp_host *host;
3578 int mr_page_shift, p;
3579 u64 max_pages_per_mr;
3580 unsigned int flags = 0;
3582 srp_dev = kzalloc(sizeof(*srp_dev), GFP_KERNEL);
3587 * Use the smallest page size supported by the HCA, down to a
3588 * minimum of 4096 bytes. We're unlikely to build large sglists
3589 * out of smaller entries.
3591 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1);
3592 srp_dev->mr_page_size = 1 << mr_page_shift;
3593 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1);
3594 max_pages_per_mr = attr->max_mr_size;
3595 do_div(max_pages_per_mr, srp_dev->mr_page_size);
3596 pr_debug("%s: %llu / %u = %llu <> %u\n", __func__,
3597 attr->max_mr_size, srp_dev->mr_page_size,
3598 max_pages_per_mr, SRP_MAX_PAGES_PER_MR);
3599 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR,
3602 srp_dev->has_fmr = (device->alloc_fmr && device->dealloc_fmr &&
3603 device->map_phys_fmr && device->unmap_fmr);
3604 srp_dev->has_fr = (attr->device_cap_flags &
3605 IB_DEVICE_MEM_MGT_EXTENSIONS);
3606 if (!never_register && !srp_dev->has_fmr && !srp_dev->has_fr) {
3607 dev_warn(&device->dev, "neither FMR nor FR is supported\n");
3608 } else if (!never_register &&
3609 attr->max_mr_size >= 2 * srp_dev->mr_page_size) {
3610 srp_dev->use_fast_reg = (srp_dev->has_fr &&
3611 (!srp_dev->has_fmr || prefer_fr));
3612 srp_dev->use_fmr = !srp_dev->use_fast_reg && srp_dev->has_fmr;
3615 if (never_register || !register_always ||
3616 (!srp_dev->has_fmr && !srp_dev->has_fr))
3617 flags |= IB_PD_UNSAFE_GLOBAL_RKEY;
3619 if (srp_dev->use_fast_reg) {
3620 srp_dev->max_pages_per_mr =
3621 min_t(u32, srp_dev->max_pages_per_mr,
3622 attr->max_fast_reg_page_list_len);
3624 srp_dev->mr_max_size = srp_dev->mr_page_size *
3625 srp_dev->max_pages_per_mr;
3626 pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len = %u, max_pages_per_mr = %d, mr_max_size = %#x\n",
3627 device->name, mr_page_shift, attr->max_mr_size,
3628 attr->max_fast_reg_page_list_len,
3629 srp_dev->max_pages_per_mr, srp_dev->mr_max_size);
3631 INIT_LIST_HEAD(&srp_dev->dev_list);
3633 srp_dev->dev = device;
3634 srp_dev->pd = ib_alloc_pd(device, flags);
3635 if (IS_ERR(srp_dev->pd))
3639 for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) {
3640 host = srp_add_port(srp_dev, p);
3642 list_add_tail(&host->list, &srp_dev->dev_list);
3645 ib_set_client_data(device, &srp_client, srp_dev);
3652 static void srp_remove_one(struct ib_device *device, void *client_data)
3654 struct srp_device *srp_dev;
3655 struct srp_host *host, *tmp_host;
3656 struct srp_target_port *target;
3658 srp_dev = client_data;
3662 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
3663 device_unregister(&host->dev);
3665 * Wait for the sysfs entry to go away, so that no new
3666 * target ports can be created.
3668 wait_for_completion(&host->released);
3671 * Remove all target ports.
3673 spin_lock(&host->target_lock);
3674 list_for_each_entry(target, &host->target_list, list)
3675 srp_queue_remove_work(target);
3676 spin_unlock(&host->target_lock);
3679 * Wait for tl_err and target port removal tasks.
3681 flush_workqueue(system_long_wq);
3682 flush_workqueue(srp_remove_wq);
3687 ib_dealloc_pd(srp_dev->pd);
3692 static struct srp_function_template ib_srp_transport_functions = {
3693 .has_rport_state = true,
3694 .reset_timer_if_blocked = true,
3695 .reconnect_delay = &srp_reconnect_delay,
3696 .fast_io_fail_tmo = &srp_fast_io_fail_tmo,
3697 .dev_loss_tmo = &srp_dev_loss_tmo,
3698 .reconnect = srp_rport_reconnect,
3699 .rport_delete = srp_rport_delete,
3700 .terminate_rport_io = srp_terminate_io,
3703 static int __init srp_init_module(void)
3707 if (srp_sg_tablesize) {
3708 pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n");
3709 if (!cmd_sg_entries)
3710 cmd_sg_entries = srp_sg_tablesize;
3713 if (!cmd_sg_entries)
3714 cmd_sg_entries = SRP_DEF_SG_TABLESIZE;
3716 if (cmd_sg_entries > 255) {
3717 pr_warn("Clamping cmd_sg_entries to 255\n");
3718 cmd_sg_entries = 255;
3721 if (!indirect_sg_entries)
3722 indirect_sg_entries = cmd_sg_entries;
3723 else if (indirect_sg_entries < cmd_sg_entries) {
3724 pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n",
3726 indirect_sg_entries = cmd_sg_entries;
3729 if (indirect_sg_entries > SG_MAX_SEGMENTS) {
3730 pr_warn("Clamping indirect_sg_entries to %u\n",
3732 indirect_sg_entries = SG_MAX_SEGMENTS;
3735 srp_remove_wq = create_workqueue("srp_remove");
3736 if (!srp_remove_wq) {
3742 ib_srp_transport_template =
3743 srp_attach_transport(&ib_srp_transport_functions);
3744 if (!ib_srp_transport_template)
3747 ret = class_register(&srp_class);
3749 pr_err("couldn't register class infiniband_srp\n");
3753 ib_sa_register_client(&srp_sa_client);
3755 ret = ib_register_client(&srp_client);
3757 pr_err("couldn't register IB client\n");
3765 ib_sa_unregister_client(&srp_sa_client);
3766 class_unregister(&srp_class);
3769 srp_release_transport(ib_srp_transport_template);
3772 destroy_workqueue(srp_remove_wq);
3776 static void __exit srp_cleanup_module(void)
3778 ib_unregister_client(&srp_client);
3779 ib_sa_unregister_client(&srp_sa_client);
3780 class_unregister(&srp_class);
3781 srp_release_transport(ib_srp_transport_template);
3782 destroy_workqueue(srp_remove_wq);
3785 module_init(srp_init_module);
3786 module_exit(srp_cleanup_module);