2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <linux/init.h>
35 #include <linux/slab.h>
36 #include <linux/err.h>
37 #include <linux/string.h>
38 #include <linux/parser.h>
39 #include <linux/random.h>
40 #include <linux/jiffies.h>
42 #include <asm/atomic.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_device.h>
46 #include <scsi/scsi_dbg.h>
48 #include <scsi/scsi_transport_srp.h>
52 #define DRV_NAME "ib_srp"
53 #define PFX DRV_NAME ": "
54 #define DRV_VERSION "0.2"
55 #define DRV_RELDATE "November 1, 2005"
57 MODULE_AUTHOR("Roland Dreier");
58 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
59 "v" DRV_VERSION " (" DRV_RELDATE ")");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
63 static int srp_max_iu_len;
65 module_param(srp_sg_tablesize, int, 0444);
66 MODULE_PARM_DESC(srp_sg_tablesize,
67 "Max number of gather/scatter entries per I/O (default is 12, max 255)");
69 static int topspin_workarounds = 1;
71 module_param(topspin_workarounds, int, 0444);
72 MODULE_PARM_DESC(topspin_workarounds,
73 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
75 static int mellanox_workarounds = 1;
77 module_param(mellanox_workarounds, int, 0444);
78 MODULE_PARM_DESC(mellanox_workarounds,
79 "Enable workarounds for Mellanox SRP target bugs if != 0");
81 static void srp_add_one(struct ib_device *device);
82 static void srp_remove_one(struct ib_device *device);
83 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
84 static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
85 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
87 static struct scsi_transport_template *ib_srp_transport_template;
89 static struct ib_client srp_client = {
92 .remove = srp_remove_one
95 static struct ib_sa_client srp_sa_client;
97 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
99 return (struct srp_target_port *) host->hostdata;
102 static const char *srp_target_info(struct Scsi_Host *host)
104 return host_to_target(host)->target_name;
107 static int srp_target_is_topspin(struct srp_target_port *target)
109 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
110 static const u8 cisco_oui[3] = { 0x00, 0x1b, 0x0d };
112 return topspin_workarounds &&
113 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) ||
114 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui));
117 static int srp_target_is_mellanox(struct srp_target_port *target)
119 static const u8 mellanox_oui[3] = { 0x00, 0x02, 0xc9 };
121 return mellanox_workarounds &&
122 !memcmp(&target->ioc_guid, mellanox_oui, sizeof mellanox_oui);
125 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
127 enum dma_data_direction direction)
131 iu = kmalloc(sizeof *iu, gfp_mask);
135 iu->buf = kzalloc(size, gfp_mask);
139 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size,
141 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma))
145 iu->direction = direction;
157 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
162 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size,
168 static void srp_qp_event(struct ib_event *event, void *context)
170 printk(KERN_ERR PFX "QP event %d\n", event->event);
173 static int srp_init_qp(struct srp_target_port *target,
176 struct ib_qp_attr *attr;
179 attr = kmalloc(sizeof *attr, GFP_KERNEL);
183 ret = ib_find_pkey(target->srp_host->srp_dev->dev,
184 target->srp_host->port,
185 be16_to_cpu(target->path.pkey),
190 attr->qp_state = IB_QPS_INIT;
191 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
192 IB_ACCESS_REMOTE_WRITE);
193 attr->port_num = target->srp_host->port;
195 ret = ib_modify_qp(qp, attr,
206 static int srp_new_cm_id(struct srp_target_port *target)
208 struct ib_cm_id *new_cm_id;
210 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev,
211 srp_cm_handler, target);
212 if (IS_ERR(new_cm_id))
213 return PTR_ERR(new_cm_id);
216 ib_destroy_cm_id(target->cm_id);
217 target->cm_id = new_cm_id;
222 static int srp_create_target_ib(struct srp_target_port *target)
224 struct ib_qp_init_attr *init_attr;
227 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
231 target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
232 srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
233 if (IS_ERR(target->recv_cq)) {
234 ret = PTR_ERR(target->recv_cq);
238 target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
239 srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
240 if (IS_ERR(target->send_cq)) {
241 ret = PTR_ERR(target->send_cq);
245 ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
247 init_attr->event_handler = srp_qp_event;
248 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
249 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
250 init_attr->cap.max_recv_sge = 1;
251 init_attr->cap.max_send_sge = 1;
252 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
253 init_attr->qp_type = IB_QPT_RC;
254 init_attr->send_cq = target->send_cq;
255 init_attr->recv_cq = target->recv_cq;
257 target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
258 if (IS_ERR(target->qp)) {
259 ret = PTR_ERR(target->qp);
263 ret = srp_init_qp(target, target->qp);
271 ib_destroy_qp(target->qp);
274 ib_destroy_cq(target->send_cq);
277 ib_destroy_cq(target->recv_cq);
284 static void srp_free_target_ib(struct srp_target_port *target)
288 ib_destroy_qp(target->qp);
289 ib_destroy_cq(target->send_cq);
290 ib_destroy_cq(target->recv_cq);
292 for (i = 0; i < SRP_RQ_SIZE; ++i)
293 srp_free_iu(target->srp_host, target->rx_ring[i]);
294 for (i = 0; i < SRP_SQ_SIZE; ++i)
295 srp_free_iu(target->srp_host, target->tx_ring[i]);
298 static void srp_path_rec_completion(int status,
299 struct ib_sa_path_rec *pathrec,
302 struct srp_target_port *target = target_ptr;
304 target->status = status;
306 shost_printk(KERN_ERR, target->scsi_host,
307 PFX "Got failed path rec status %d\n", status);
309 target->path = *pathrec;
310 complete(&target->done);
313 static int srp_lookup_path(struct srp_target_port *target)
315 target->path.numb_path = 1;
317 init_completion(&target->done);
319 target->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
320 target->srp_host->srp_dev->dev,
321 target->srp_host->port,
323 IB_SA_PATH_REC_SERVICE_ID |
324 IB_SA_PATH_REC_DGID |
325 IB_SA_PATH_REC_SGID |
326 IB_SA_PATH_REC_NUMB_PATH |
328 SRP_PATH_REC_TIMEOUT_MS,
330 srp_path_rec_completion,
331 target, &target->path_query);
332 if (target->path_query_id < 0)
333 return target->path_query_id;
335 wait_for_completion(&target->done);
337 if (target->status < 0)
338 shost_printk(KERN_WARNING, target->scsi_host,
339 PFX "Path record query failed\n");
341 return target->status;
344 static int srp_send_req(struct srp_target_port *target)
347 struct ib_cm_req_param param;
348 struct srp_login_req priv;
352 req = kzalloc(sizeof *req, GFP_KERNEL);
356 req->param.primary_path = &target->path;
357 req->param.alternate_path = NULL;
358 req->param.service_id = target->service_id;
359 req->param.qp_num = target->qp->qp_num;
360 req->param.qp_type = target->qp->qp_type;
361 req->param.private_data = &req->priv;
362 req->param.private_data_len = sizeof req->priv;
363 req->param.flow_control = 1;
365 get_random_bytes(&req->param.starting_psn, 4);
366 req->param.starting_psn &= 0xffffff;
369 * Pick some arbitrary defaults here; we could make these
370 * module parameters if anyone cared about setting them.
372 req->param.responder_resources = 4;
373 req->param.remote_cm_response_timeout = 20;
374 req->param.local_cm_response_timeout = 20;
375 req->param.retry_count = 7;
376 req->param.rnr_retry_count = 7;
377 req->param.max_cm_retries = 15;
379 req->priv.opcode = SRP_LOGIN_REQ;
381 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
382 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
383 SRP_BUF_FORMAT_INDIRECT);
385 * In the published SRP specification (draft rev. 16a), the
386 * port identifier format is 8 bytes of ID extension followed
387 * by 8 bytes of GUID. Older drafts put the two halves in the
388 * opposite order, so that the GUID comes first.
390 * Targets conforming to these obsolete drafts can be
391 * recognized by the I/O Class they report.
393 if (target->io_class == SRP_REV10_IB_IO_CLASS) {
394 memcpy(req->priv.initiator_port_id,
395 &target->path.sgid.global.interface_id, 8);
396 memcpy(req->priv.initiator_port_id + 8,
397 &target->initiator_ext, 8);
398 memcpy(req->priv.target_port_id, &target->ioc_guid, 8);
399 memcpy(req->priv.target_port_id + 8, &target->id_ext, 8);
401 memcpy(req->priv.initiator_port_id,
402 &target->initiator_ext, 8);
403 memcpy(req->priv.initiator_port_id + 8,
404 &target->path.sgid.global.interface_id, 8);
405 memcpy(req->priv.target_port_id, &target->id_ext, 8);
406 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
410 * Topspin/Cisco SRP targets will reject our login unless we
411 * zero out the first 8 bytes of our initiator port ID and set
412 * the second 8 bytes to the local node GUID.
414 if (srp_target_is_topspin(target)) {
415 shost_printk(KERN_DEBUG, target->scsi_host,
416 PFX "Topspin/Cisco initiator port ID workaround "
417 "activated for target GUID %016llx\n",
418 (unsigned long long) be64_to_cpu(target->ioc_guid));
419 memset(req->priv.initiator_port_id, 0, 8);
420 memcpy(req->priv.initiator_port_id + 8,
421 &target->srp_host->srp_dev->dev->node_guid, 8);
424 status = ib_send_cm_req(target->cm_id, &req->param);
431 static void srp_disconnect_target(struct srp_target_port *target)
433 /* XXX should send SRP_I_LOGOUT request */
435 init_completion(&target->done);
436 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
437 shost_printk(KERN_DEBUG, target->scsi_host,
438 PFX "Sending CM DREQ failed\n");
441 wait_for_completion(&target->done);
444 static bool srp_change_state(struct srp_target_port *target,
445 enum srp_target_state old,
446 enum srp_target_state new)
448 bool changed = false;
450 spin_lock_irq(&target->lock);
451 if (target->state == old) {
455 spin_unlock_irq(&target->lock);
459 static void srp_remove_work(struct work_struct *work)
461 struct srp_target_port *target =
462 container_of(work, struct srp_target_port, work);
464 if (!srp_change_state(target, SRP_TARGET_DEAD, SRP_TARGET_REMOVED))
467 spin_lock(&target->srp_host->target_lock);
468 list_del(&target->list);
469 spin_unlock(&target->srp_host->target_lock);
471 srp_remove_host(target->scsi_host);
472 scsi_remove_host(target->scsi_host);
473 ib_destroy_cm_id(target->cm_id);
474 srp_free_target_ib(target);
475 scsi_host_put(target->scsi_host);
478 static int srp_connect_target(struct srp_target_port *target)
483 ret = srp_lookup_path(target);
488 init_completion(&target->done);
489 ret = srp_send_req(target);
492 wait_for_completion(&target->done);
495 * The CM event handling code will set status to
496 * SRP_PORT_REDIRECT if we get a port redirect REJ
497 * back, or SRP_DLID_REDIRECT if we get a lid/qp
500 switch (target->status) {
504 case SRP_PORT_REDIRECT:
505 ret = srp_lookup_path(target);
510 case SRP_DLID_REDIRECT:
514 /* Our current CM id was stale, and is now in timewait.
515 * Try to reconnect with a new one.
517 if (!retries-- || srp_new_cm_id(target)) {
518 shost_printk(KERN_ERR, target->scsi_host, PFX
519 "giving up on stale connection\n");
520 target->status = -ECONNRESET;
521 return target->status;
524 shost_printk(KERN_ERR, target->scsi_host, PFX
525 "retrying stale connection\n");
529 return target->status;
534 static void srp_unmap_data(struct scsi_cmnd *scmnd,
535 struct srp_target_port *target,
536 struct srp_request *req)
538 if (!scsi_sglist(scmnd) ||
539 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
540 scmnd->sc_data_direction != DMA_FROM_DEVICE))
544 ib_fmr_pool_unmap(req->fmr);
548 ib_dma_unmap_sg(target->srp_host->srp_dev->dev, scsi_sglist(scmnd),
549 scsi_sg_count(scmnd), scmnd->sc_data_direction);
552 static void srp_remove_req(struct srp_target_port *target,
553 struct srp_request *req, s32 req_lim_delta)
557 srp_unmap_data(req->scmnd, target, req);
558 spin_lock_irqsave(&target->lock, flags);
559 target->req_lim += req_lim_delta;
561 list_add_tail(&req->list, &target->free_reqs);
562 spin_unlock_irqrestore(&target->lock, flags);
565 static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
567 req->scmnd->result = DID_RESET << 16;
568 req->scmnd->scsi_done(req->scmnd);
569 srp_remove_req(target, req, 0);
572 static int srp_reconnect_target(struct srp_target_port *target)
574 struct ib_qp_attr qp_attr;
578 if (!srp_change_state(target, SRP_TARGET_LIVE, SRP_TARGET_CONNECTING))
581 srp_disconnect_target(target);
583 * Now get a new local CM ID so that we avoid confusing the
584 * target in case things are really fouled up.
586 ret = srp_new_cm_id(target);
590 qp_attr.qp_state = IB_QPS_RESET;
591 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
595 ret = srp_init_qp(target, target->qp);
599 while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
601 while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
604 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
605 struct srp_request *req = &target->req_ring[i];
607 srp_reset_req(target, req);
610 INIT_LIST_HEAD(&target->free_tx);
611 for (i = 0; i < SRP_SQ_SIZE; ++i)
612 list_add(&target->tx_ring[i]->list, &target->free_tx);
614 target->qp_in_error = 0;
615 ret = srp_connect_target(target);
619 if (!srp_change_state(target, SRP_TARGET_CONNECTING, SRP_TARGET_LIVE))
625 shost_printk(KERN_ERR, target->scsi_host,
626 PFX "reconnect failed (%d), removing target port.\n", ret);
629 * We couldn't reconnect, so kill our target port off.
630 * However, we have to defer the real removal because we
631 * are in the context of the SCSI error handler now, which
632 * will deadlock if we call scsi_remove_host().
634 * Schedule our work inside the lock to avoid a race with
635 * the flush_scheduled_work() in srp_remove_one().
637 spin_lock_irq(&target->lock);
638 if (target->state == SRP_TARGET_CONNECTING) {
639 target->state = SRP_TARGET_DEAD;
640 INIT_WORK(&target->work, srp_remove_work);
641 schedule_work(&target->work);
643 spin_unlock_irq(&target->lock);
648 static int srp_map_fmr(struct srp_target_port *target, struct scatterlist *scat,
649 int sg_cnt, struct srp_request *req,
650 struct srp_direct_buf *buf)
658 struct srp_device *dev = target->srp_host->srp_dev;
659 struct ib_device *ibdev = dev->dev;
660 struct scatterlist *sg;
665 if (srp_target_is_mellanox(target) &&
666 (ib_sg_dma_address(ibdev, &scat[0]) & ~dev->fmr_page_mask))
670 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
671 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
673 if (ib_sg_dma_address(ibdev, sg) & ~dev->fmr_page_mask) {
679 if ((ib_sg_dma_address(ibdev, sg) + dma_len) &
680 ~dev->fmr_page_mask) {
690 page_cnt += len >> dev->fmr_page_shift;
691 if (page_cnt > SRP_FMR_SIZE)
694 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
699 scsi_for_each_sg(req->scmnd, sg, sg_cnt, i) {
700 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
702 for (j = 0; j < dma_len; j += dev->fmr_page_size)
703 dma_pages[page_cnt++] =
704 (ib_sg_dma_address(ibdev, sg) &
705 dev->fmr_page_mask) + j;
708 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
709 dma_pages, page_cnt, io_addr);
710 if (IS_ERR(req->fmr)) {
711 ret = PTR_ERR(req->fmr);
716 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, &scat[0]) &
717 ~dev->fmr_page_mask);
718 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
719 buf->len = cpu_to_be32(len);
729 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
730 struct srp_request *req)
732 struct scatterlist *scat;
733 struct srp_cmd *cmd = req->cmd->buf;
734 int len, nents, count;
735 u8 fmt = SRP_DATA_DESC_DIRECT;
736 struct srp_device *dev;
737 struct ib_device *ibdev;
739 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE)
740 return sizeof (struct srp_cmd);
742 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
743 scmnd->sc_data_direction != DMA_TO_DEVICE) {
744 shost_printk(KERN_WARNING, target->scsi_host,
745 PFX "Unhandled data direction %d\n",
746 scmnd->sc_data_direction);
750 nents = scsi_sg_count(scmnd);
751 scat = scsi_sglist(scmnd);
753 dev = target->srp_host->srp_dev;
756 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction);
758 fmt = SRP_DATA_DESC_DIRECT;
759 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
763 * The midlayer only generated a single gather/scatter
764 * entry, or DMA mapping coalesced everything to a
765 * single entry. So a direct descriptor along with
766 * the DMA MR suffices.
768 struct srp_direct_buf *buf = (void *) cmd->add_data;
770 buf->va = cpu_to_be64(ib_sg_dma_address(ibdev, scat));
771 buf->key = cpu_to_be32(target->rkey);
772 buf->len = cpu_to_be32(ib_sg_dma_len(ibdev, scat));
773 } else if (srp_map_fmr(target, scat, count, req,
774 (void *) cmd->add_data)) {
776 * FMR mapping failed, and the scatterlist has more
777 * than one entry. Generate an indirect memory
780 struct srp_indirect_buf *buf = (void *) cmd->add_data;
781 struct scatterlist *sg;
785 fmt = SRP_DATA_DESC_INDIRECT;
786 len = sizeof (struct srp_cmd) +
787 sizeof (struct srp_indirect_buf) +
788 count * sizeof (struct srp_direct_buf);
790 scsi_for_each_sg(scmnd, sg, count, i) {
791 unsigned int dma_len = ib_sg_dma_len(ibdev, sg);
793 buf->desc_list[i].va =
794 cpu_to_be64(ib_sg_dma_address(ibdev, sg));
795 buf->desc_list[i].key =
796 cpu_to_be32(target->rkey);
797 buf->desc_list[i].len = cpu_to_be32(dma_len);
801 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
802 cmd->data_out_desc_cnt = count;
804 cmd->data_in_desc_cnt = count;
807 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
808 buf->table_desc.key =
809 cpu_to_be32(target->rkey);
810 buf->table_desc.len =
811 cpu_to_be32(count * sizeof (struct srp_direct_buf));
813 buf->len = cpu_to_be32(datalen);
816 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
817 cmd->buf_fmt = fmt << 4;
825 * Return an IU and possible credit to the free pool
827 static void srp_put_tx_iu(struct srp_target_port *target, struct srp_iu *iu,
828 enum srp_iu_type iu_type)
832 spin_lock_irqsave(&target->lock, flags);
833 list_add(&iu->list, &target->free_tx);
834 if (iu_type != SRP_IU_RSP)
836 spin_unlock_irqrestore(&target->lock, flags);
840 * Must be called with target->lock held to protect req_lim and free_tx.
841 * If IU is not sent, it must be returned using srp_put_tx_iu().
844 * An upper limit for the number of allocated information units for each
846 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
847 * more than Scsi_Host.can_queue requests.
848 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
849 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
850 * one unanswered SRP request to an initiator.
852 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
853 enum srp_iu_type iu_type)
855 s32 rsv = (iu_type == SRP_IU_TSK_MGMT) ? 0 : SRP_TSK_MGMT_SQ_SIZE;
858 srp_send_completion(target->send_cq, target);
860 if (list_empty(&target->free_tx))
863 /* Initiator responses to target requests do not consume credits */
864 if (iu_type != SRP_IU_RSP) {
865 if (target->req_lim <= rsv) {
866 ++target->zero_req_lim;
873 iu = list_first_entry(&target->free_tx, struct srp_iu, list);
878 static int srp_post_send(struct srp_target_port *target,
879 struct srp_iu *iu, int len)
882 struct ib_send_wr wr, *bad_wr;
886 list.lkey = target->lkey;
889 wr.wr_id = (uintptr_t) iu;
892 wr.opcode = IB_WR_SEND;
893 wr.send_flags = IB_SEND_SIGNALED;
895 return ib_post_send(target->qp, &wr, &bad_wr);
898 static int srp_post_recv(struct srp_target_port *target, struct srp_iu *iu)
900 struct ib_recv_wr wr, *bad_wr;
904 list.length = iu->size;
905 list.lkey = target->lkey;
908 wr.wr_id = (uintptr_t) iu;
912 return ib_post_recv(target->qp, &wr, &bad_wr);
915 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
917 struct srp_request *req;
918 struct scsi_cmnd *scmnd;
921 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
922 spin_lock_irqsave(&target->lock, flags);
923 target->req_lim += be32_to_cpu(rsp->req_lim_delta);
924 spin_unlock_irqrestore(&target->lock, flags);
926 target->tsk_mgmt_status = -1;
927 if (be32_to_cpu(rsp->resp_data_len) >= 4)
928 target->tsk_mgmt_status = rsp->data[3];
929 complete(&target->tsk_mgmt_done);
931 req = &target->req_ring[rsp->tag];
934 shost_printk(KERN_ERR, target->scsi_host,
935 "Null scmnd for RSP w/tag %016llx\n",
936 (unsigned long long) rsp->tag);
937 scmnd->result = rsp->status;
939 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
940 memcpy(scmnd->sense_buffer, rsp->data +
941 be32_to_cpu(rsp->resp_data_len),
942 min_t(int, be32_to_cpu(rsp->sense_data_len),
943 SCSI_SENSE_BUFFERSIZE));
946 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
947 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt));
948 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
949 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
951 srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
952 scmnd->host_scribble = NULL;
953 scmnd->scsi_done(scmnd);
957 static int srp_response_common(struct srp_target_port *target, s32 req_delta,
960 struct ib_device *dev = target->srp_host->srp_dev->dev;
965 spin_lock_irqsave(&target->lock, flags);
966 target->req_lim += req_delta;
967 iu = __srp_get_tx_iu(target, SRP_IU_RSP);
968 spin_unlock_irqrestore(&target->lock, flags);
971 shost_printk(KERN_ERR, target->scsi_host, PFX
972 "no IU available to send response\n");
976 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE);
977 memcpy(iu->buf, rsp, len);
978 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE);
980 err = srp_post_send(target, iu, len);
982 shost_printk(KERN_ERR, target->scsi_host, PFX
983 "unable to post response: %d\n", err);
984 srp_put_tx_iu(target, iu, SRP_IU_RSP);
990 static void srp_process_cred_req(struct srp_target_port *target,
991 struct srp_cred_req *req)
993 struct srp_cred_rsp rsp = {
994 .opcode = SRP_CRED_RSP,
997 s32 delta = be32_to_cpu(req->req_lim_delta);
999 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1000 shost_printk(KERN_ERR, target->scsi_host, PFX
1001 "problems processing SRP_CRED_REQ\n");
1004 static void srp_process_aer_req(struct srp_target_port *target,
1005 struct srp_aer_req *req)
1007 struct srp_aer_rsp rsp = {
1008 .opcode = SRP_AER_RSP,
1011 s32 delta = be32_to_cpu(req->req_lim_delta);
1013 shost_printk(KERN_ERR, target->scsi_host, PFX
1014 "ignoring AER for LUN %llu\n", be64_to_cpu(req->lun));
1016 if (srp_response_common(target, delta, &rsp, sizeof rsp))
1017 shost_printk(KERN_ERR, target->scsi_host, PFX
1018 "problems processing SRP_AER_REQ\n");
1021 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
1023 struct ib_device *dev = target->srp_host->srp_dev->dev;
1024 struct srp_iu *iu = (struct srp_iu *) wc->wr_id;
1028 ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
1031 opcode = *(u8 *) iu->buf;
1034 shost_printk(KERN_ERR, target->scsi_host,
1035 PFX "recv completion, opcode 0x%02x\n", opcode);
1036 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 8, 1,
1037 iu->buf, wc->byte_len, true);
1042 srp_process_rsp(target, iu->buf);
1046 srp_process_cred_req(target, iu->buf);
1050 srp_process_aer_req(target, iu->buf);
1054 /* XXX Handle target logout */
1055 shost_printk(KERN_WARNING, target->scsi_host,
1056 PFX "Got target logout request\n");
1060 shost_printk(KERN_WARNING, target->scsi_host,
1061 PFX "Unhandled SRP opcode 0x%02x\n", opcode);
1065 ib_dma_sync_single_for_device(dev, iu->dma, target->max_ti_iu_len,
1068 res = srp_post_recv(target, iu);
1070 shost_printk(KERN_ERR, target->scsi_host,
1071 PFX "Recv failed with error code %d\n", res);
1074 static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
1076 struct srp_target_port *target = target_ptr;
1079 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1080 while (ib_poll_cq(cq, 1, &wc) > 0) {
1082 shost_printk(KERN_ERR, target->scsi_host,
1083 PFX "failed receive status %d\n",
1085 target->qp_in_error = 1;
1089 srp_handle_recv(target, &wc);
1093 static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
1095 struct srp_target_port *target = target_ptr;
1099 while (ib_poll_cq(cq, 1, &wc) > 0) {
1101 shost_printk(KERN_ERR, target->scsi_host,
1102 PFX "failed send status %d\n",
1104 target->qp_in_error = 1;
1108 iu = (struct srp_iu *) wc.wr_id;
1109 list_add(&iu->list, &target->free_tx);
1113 static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
1115 struct srp_target_port *target = host_to_target(shost);
1116 struct srp_request *req;
1118 struct srp_cmd *cmd;
1119 struct ib_device *dev;
1120 unsigned long flags;
1123 if (target->state == SRP_TARGET_CONNECTING)
1126 if (target->state == SRP_TARGET_DEAD ||
1127 target->state == SRP_TARGET_REMOVED) {
1128 scmnd->result = DID_BAD_TARGET << 16;
1129 scmnd->scsi_done(scmnd);
1133 spin_lock_irqsave(&target->lock, flags);
1134 iu = __srp_get_tx_iu(target, SRP_IU_CMD);
1136 req = list_first_entry(&target->free_reqs, struct srp_request,
1138 list_del(&req->list);
1140 spin_unlock_irqrestore(&target->lock, flags);
1145 dev = target->srp_host->srp_dev->dev;
1146 ib_dma_sync_single_for_cpu(dev, iu->dma, srp_max_iu_len,
1150 scmnd->host_scribble = (void *) req;
1153 memset(cmd, 0, sizeof *cmd);
1155 cmd->opcode = SRP_CMD;
1156 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
1157 cmd->tag = req->index;
1158 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
1163 len = srp_map_data(scmnd, target, req);
1165 shost_printk(KERN_ERR, target->scsi_host,
1166 PFX "Failed to map data\n");
1170 ib_dma_sync_single_for_device(dev, iu->dma, srp_max_iu_len,
1173 if (srp_post_send(target, iu, len)) {
1174 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n");
1181 srp_unmap_data(scmnd, target, req);
1184 srp_put_tx_iu(target, iu, SRP_IU_CMD);
1186 spin_lock_irqsave(&target->lock, flags);
1187 list_add(&req->list, &target->free_reqs);
1188 spin_unlock_irqrestore(&target->lock, flags);
1191 return SCSI_MLQUEUE_HOST_BUSY;
1194 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1198 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1199 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1200 target->max_ti_iu_len,
1201 GFP_KERNEL, DMA_FROM_DEVICE);
1202 if (!target->rx_ring[i])
1206 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1207 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1209 GFP_KERNEL, DMA_TO_DEVICE);
1210 if (!target->tx_ring[i])
1213 list_add(&target->tx_ring[i]->list, &target->free_tx);
1219 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1220 srp_free_iu(target->srp_host, target->rx_ring[i]);
1221 target->rx_ring[i] = NULL;
1224 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1225 srp_free_iu(target->srp_host, target->tx_ring[i]);
1226 target->tx_ring[i] = NULL;
1232 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1233 struct ib_cm_event *event,
1234 struct srp_target_port *target)
1236 struct Scsi_Host *shost = target->scsi_host;
1237 struct ib_class_port_info *cpi;
1240 switch (event->param.rej_rcvd.reason) {
1241 case IB_CM_REJ_PORT_CM_REDIRECT:
1242 cpi = event->param.rej_rcvd.ari;
1243 target->path.dlid = cpi->redirect_lid;
1244 target->path.pkey = cpi->redirect_pkey;
1245 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1246 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1248 target->status = target->path.dlid ?
1249 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1252 case IB_CM_REJ_PORT_REDIRECT:
1253 if (srp_target_is_topspin(target)) {
1255 * Topspin/Cisco SRP gateways incorrectly send
1256 * reject reason code 25 when they mean 24
1259 memcpy(target->path.dgid.raw,
1260 event->param.rej_rcvd.ari, 16);
1262 shost_printk(KERN_DEBUG, shost,
1263 PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1264 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1265 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1267 target->status = SRP_PORT_REDIRECT;
1269 shost_printk(KERN_WARNING, shost,
1270 " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1271 target->status = -ECONNRESET;
1275 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1276 shost_printk(KERN_WARNING, shost,
1277 " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1278 target->status = -ECONNRESET;
1281 case IB_CM_REJ_CONSUMER_DEFINED:
1282 opcode = *(u8 *) event->private_data;
1283 if (opcode == SRP_LOGIN_REJ) {
1284 struct srp_login_rej *rej = event->private_data;
1285 u32 reason = be32_to_cpu(rej->reason);
1287 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1288 shost_printk(KERN_WARNING, shost,
1289 PFX "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1291 shost_printk(KERN_WARNING, shost,
1292 PFX "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1294 shost_printk(KERN_WARNING, shost,
1295 " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1296 " opcode 0x%02x\n", opcode);
1297 target->status = -ECONNRESET;
1300 case IB_CM_REJ_STALE_CONN:
1301 shost_printk(KERN_WARNING, shost, " REJ reason: stale connection\n");
1302 target->status = SRP_STALE_CONN;
1306 shost_printk(KERN_WARNING, shost, " REJ reason 0x%x\n",
1307 event->param.rej_rcvd.reason);
1308 target->status = -ECONNRESET;
1312 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1314 struct srp_target_port *target = cm_id->context;
1315 struct ib_qp_attr *qp_attr = NULL;
1321 switch (event->event) {
1322 case IB_CM_REQ_ERROR:
1323 shost_printk(KERN_DEBUG, target->scsi_host,
1324 PFX "Sending CM REQ failed\n");
1326 target->status = -ECONNRESET;
1329 case IB_CM_REP_RECEIVED:
1331 opcode = *(u8 *) event->private_data;
1333 if (opcode == SRP_LOGIN_RSP) {
1334 struct srp_login_rsp *rsp = event->private_data;
1336 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1337 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1340 * Reserve credits for task management so we don't
1341 * bounce requests back to the SCSI mid-layer.
1343 target->scsi_host->can_queue
1344 = min(target->req_lim - SRP_TSK_MGMT_SQ_SIZE,
1345 target->scsi_host->can_queue);
1347 shost_printk(KERN_WARNING, target->scsi_host,
1348 PFX "Unhandled RSP opcode %#x\n", opcode);
1349 target->status = -ECONNRESET;
1353 if (!target->rx_ring[0]) {
1354 target->status = srp_alloc_iu_bufs(target);
1359 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1361 target->status = -ENOMEM;
1365 qp_attr->qp_state = IB_QPS_RTR;
1366 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1370 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1374 for (i = 0; i < SRP_RQ_SIZE; i++) {
1375 struct srp_iu *iu = target->rx_ring[i];
1376 target->status = srp_post_recv(target, iu);
1383 qp_attr->qp_state = IB_QPS_RTS;
1384 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1388 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1392 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1398 case IB_CM_REJ_RECEIVED:
1399 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n");
1402 srp_cm_rej_handler(cm_id, event, target);
1405 case IB_CM_DREQ_RECEIVED:
1406 shost_printk(KERN_WARNING, target->scsi_host,
1407 PFX "DREQ received - connection closed\n");
1408 if (ib_send_cm_drep(cm_id, NULL, 0))
1409 shost_printk(KERN_ERR, target->scsi_host,
1410 PFX "Sending CM DREP failed\n");
1413 case IB_CM_TIMEWAIT_EXIT:
1414 shost_printk(KERN_ERR, target->scsi_host,
1415 PFX "connection closed\n");
1421 case IB_CM_MRA_RECEIVED:
1422 case IB_CM_DREQ_ERROR:
1423 case IB_CM_DREP_RECEIVED:
1427 shost_printk(KERN_WARNING, target->scsi_host,
1428 PFX "Unhandled CM event %d\n", event->event);
1433 complete(&target->done);
1440 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1441 u64 req_tag, unsigned int lun, u8 func)
1443 struct ib_device *dev = target->srp_host->srp_dev->dev;
1445 struct srp_tsk_mgmt *tsk_mgmt;
1447 if (target->state == SRP_TARGET_DEAD ||
1448 target->state == SRP_TARGET_REMOVED)
1451 init_completion(&target->tsk_mgmt_done);
1453 spin_lock_irq(&target->lock);
1454 iu = __srp_get_tx_iu(target, SRP_IU_TSK_MGMT);
1455 spin_unlock_irq(&target->lock);
1460 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt,
1463 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1465 tsk_mgmt->opcode = SRP_TSK_MGMT;
1466 tsk_mgmt->lun = cpu_to_be64((u64) lun << 48);
1467 tsk_mgmt->tag = req_tag | SRP_TAG_TSK_MGMT;
1468 tsk_mgmt->tsk_mgmt_func = func;
1469 tsk_mgmt->task_tag = req_tag;
1471 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt,
1473 if (srp_post_send(target, iu, sizeof *tsk_mgmt)) {
1474 srp_put_tx_iu(target, iu, SRP_IU_TSK_MGMT);
1478 if (!wait_for_completion_timeout(&target->tsk_mgmt_done,
1479 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1485 static int srp_abort(struct scsi_cmnd *scmnd)
1487 struct srp_target_port *target = host_to_target(scmnd->device->host);
1488 struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
1491 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
1493 if (!req || target->qp_in_error)
1495 if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
1496 SRP_TSK_ABORT_TASK))
1500 if (!target->tsk_mgmt_status) {
1501 srp_remove_req(target, req, 0);
1502 scmnd->result = DID_ABORT << 16;
1510 static int srp_reset_device(struct scsi_cmnd *scmnd)
1512 struct srp_target_port *target = host_to_target(scmnd->device->host);
1515 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
1517 if (target->qp_in_error)
1519 if (srp_send_tsk_mgmt(target, SRP_TAG_NO_REQ, scmnd->device->lun,
1522 if (target->tsk_mgmt_status)
1525 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1526 struct srp_request *req = &target->req_ring[i];
1527 if (req->scmnd && req->scmnd->device == scmnd->device)
1528 srp_reset_req(target, req);
1534 static int srp_reset_host(struct scsi_cmnd *scmnd)
1536 struct srp_target_port *target = host_to_target(scmnd->device->host);
1539 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n");
1541 if (!srp_reconnect_target(target))
1547 static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr,
1550 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1552 if (target->state == SRP_TARGET_DEAD ||
1553 target->state == SRP_TARGET_REMOVED)
1556 return sprintf(buf, "0x%016llx\n",
1557 (unsigned long long) be64_to_cpu(target->id_ext));
1560 static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr,
1563 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1565 if (target->state == SRP_TARGET_DEAD ||
1566 target->state == SRP_TARGET_REMOVED)
1569 return sprintf(buf, "0x%016llx\n",
1570 (unsigned long long) be64_to_cpu(target->ioc_guid));
1573 static ssize_t show_service_id(struct device *dev,
1574 struct device_attribute *attr, char *buf)
1576 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1578 if (target->state == SRP_TARGET_DEAD ||
1579 target->state == SRP_TARGET_REMOVED)
1582 return sprintf(buf, "0x%016llx\n",
1583 (unsigned long long) be64_to_cpu(target->service_id));
1586 static ssize_t show_pkey(struct device *dev, struct device_attribute *attr,
1589 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1591 if (target->state == SRP_TARGET_DEAD ||
1592 target->state == SRP_TARGET_REMOVED)
1595 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1598 static ssize_t show_dgid(struct device *dev, struct device_attribute *attr,
1601 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1603 if (target->state == SRP_TARGET_DEAD ||
1604 target->state == SRP_TARGET_REMOVED)
1607 return sprintf(buf, "%pI6\n", target->path.dgid.raw);
1610 static ssize_t show_orig_dgid(struct device *dev,
1611 struct device_attribute *attr, char *buf)
1613 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1615 if (target->state == SRP_TARGET_DEAD ||
1616 target->state == SRP_TARGET_REMOVED)
1619 return sprintf(buf, "%pI6\n", target->orig_dgid);
1622 static ssize_t show_req_lim(struct device *dev,
1623 struct device_attribute *attr, char *buf)
1625 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1627 if (target->state == SRP_TARGET_DEAD ||
1628 target->state == SRP_TARGET_REMOVED)
1631 return sprintf(buf, "%d\n", target->req_lim);
1634 static ssize_t show_zero_req_lim(struct device *dev,
1635 struct device_attribute *attr, char *buf)
1637 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1639 if (target->state == SRP_TARGET_DEAD ||
1640 target->state == SRP_TARGET_REMOVED)
1643 return sprintf(buf, "%d\n", target->zero_req_lim);
1646 static ssize_t show_local_ib_port(struct device *dev,
1647 struct device_attribute *attr, char *buf)
1649 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1651 return sprintf(buf, "%d\n", target->srp_host->port);
1654 static ssize_t show_local_ib_device(struct device *dev,
1655 struct device_attribute *attr, char *buf)
1657 struct srp_target_port *target = host_to_target(class_to_shost(dev));
1659 return sprintf(buf, "%s\n", target->srp_host->srp_dev->dev->name);
1662 static DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1663 static DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1664 static DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1665 static DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1666 static DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1667 static DEVICE_ATTR(orig_dgid, S_IRUGO, show_orig_dgid, NULL);
1668 static DEVICE_ATTR(req_lim, S_IRUGO, show_req_lim, NULL);
1669 static DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1670 static DEVICE_ATTR(local_ib_port, S_IRUGO, show_local_ib_port, NULL);
1671 static DEVICE_ATTR(local_ib_device, S_IRUGO, show_local_ib_device, NULL);
1673 static struct device_attribute *srp_host_attrs[] = {
1676 &dev_attr_service_id,
1679 &dev_attr_orig_dgid,
1681 &dev_attr_zero_req_lim,
1682 &dev_attr_local_ib_port,
1683 &dev_attr_local_ib_device,
1687 static struct scsi_host_template srp_template = {
1688 .module = THIS_MODULE,
1689 .name = "InfiniBand SRP initiator",
1690 .proc_name = DRV_NAME,
1691 .info = srp_target_info,
1692 .queuecommand = srp_queuecommand,
1693 .eh_abort_handler = srp_abort,
1694 .eh_device_reset_handler = srp_reset_device,
1695 .eh_host_reset_handler = srp_reset_host,
1696 .can_queue = SRP_CMD_SQ_SIZE,
1698 .cmd_per_lun = SRP_CMD_SQ_SIZE,
1699 .use_clustering = ENABLE_CLUSTERING,
1700 .shost_attrs = srp_host_attrs
1703 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1705 struct srp_rport_identifiers ids;
1706 struct srp_rport *rport;
1708 sprintf(target->target_name, "SRP.T10:%016llX",
1709 (unsigned long long) be64_to_cpu(target->id_ext));
1711 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dma_device))
1714 memcpy(ids.port_id, &target->id_ext, 8);
1715 memcpy(ids.port_id + 8, &target->ioc_guid, 8);
1716 ids.roles = SRP_RPORT_ROLE_TARGET;
1717 rport = srp_rport_add(target->scsi_host, &ids);
1718 if (IS_ERR(rport)) {
1719 scsi_remove_host(target->scsi_host);
1720 return PTR_ERR(rport);
1723 spin_lock(&host->target_lock);
1724 list_add_tail(&target->list, &host->target_list);
1725 spin_unlock(&host->target_lock);
1727 target->state = SRP_TARGET_LIVE;
1729 scsi_scan_target(&target->scsi_host->shost_gendev,
1730 0, target->scsi_id, SCAN_WILD_CARD, 0);
1735 static void srp_release_dev(struct device *dev)
1737 struct srp_host *host =
1738 container_of(dev, struct srp_host, dev);
1740 complete(&host->released);
1743 static struct class srp_class = {
1744 .name = "infiniband_srp",
1745 .dev_release = srp_release_dev
1749 * Target ports are added by writing
1751 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1752 * pkey=<P_Key>,service_id=<service ID>
1754 * to the add_target sysfs attribute.
1758 SRP_OPT_ID_EXT = 1 << 0,
1759 SRP_OPT_IOC_GUID = 1 << 1,
1760 SRP_OPT_DGID = 1 << 2,
1761 SRP_OPT_PKEY = 1 << 3,
1762 SRP_OPT_SERVICE_ID = 1 << 4,
1763 SRP_OPT_MAX_SECT = 1 << 5,
1764 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1765 SRP_OPT_IO_CLASS = 1 << 7,
1766 SRP_OPT_INITIATOR_EXT = 1 << 8,
1767 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1771 SRP_OPT_SERVICE_ID),
1774 static const match_table_t srp_opt_tokens = {
1775 { SRP_OPT_ID_EXT, "id_ext=%s" },
1776 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1777 { SRP_OPT_DGID, "dgid=%s" },
1778 { SRP_OPT_PKEY, "pkey=%x" },
1779 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1780 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1781 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1782 { SRP_OPT_IO_CLASS, "io_class=%x" },
1783 { SRP_OPT_INITIATOR_EXT, "initiator_ext=%s" },
1784 { SRP_OPT_ERR, NULL }
1787 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1789 char *options, *sep_opt;
1792 substring_t args[MAX_OPT_ARGS];
1798 options = kstrdup(buf, GFP_KERNEL);
1803 while ((p = strsep(&sep_opt, ",")) != NULL) {
1807 token = match_token(p, srp_opt_tokens, args);
1811 case SRP_OPT_ID_EXT:
1812 p = match_strdup(args);
1817 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1821 case SRP_OPT_IOC_GUID:
1822 p = match_strdup(args);
1827 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1832 p = match_strdup(args);
1837 if (strlen(p) != 32) {
1838 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1843 for (i = 0; i < 16; ++i) {
1844 strlcpy(dgid, p + i * 2, 3);
1845 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1848 memcpy(target->orig_dgid, target->path.dgid.raw, 16);
1852 if (match_hex(args, &token)) {
1853 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1856 target->path.pkey = cpu_to_be16(token);
1859 case SRP_OPT_SERVICE_ID:
1860 p = match_strdup(args);
1865 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1866 target->path.service_id = target->service_id;
1870 case SRP_OPT_MAX_SECT:
1871 if (match_int(args, &token)) {
1872 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1875 target->scsi_host->max_sectors = token;
1878 case SRP_OPT_MAX_CMD_PER_LUN:
1879 if (match_int(args, &token)) {
1880 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1883 target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE);
1886 case SRP_OPT_IO_CLASS:
1887 if (match_hex(args, &token)) {
1888 printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p);
1891 if (token != SRP_REV10_IB_IO_CLASS &&
1892 token != SRP_REV16A_IB_IO_CLASS) {
1893 printk(KERN_WARNING PFX "unknown IO class parameter value"
1894 " %x specified (use %x or %x).\n",
1895 token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS);
1898 target->io_class = token;
1901 case SRP_OPT_INITIATOR_EXT:
1902 p = match_strdup(args);
1907 target->initiator_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1912 printk(KERN_WARNING PFX "unknown parameter or missing value "
1913 "'%s' in target creation request\n", p);
1918 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1921 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1922 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1923 !(srp_opt_tokens[i].token & opt_mask))
1924 printk(KERN_WARNING PFX "target creation request is "
1925 "missing parameter '%s'\n",
1926 srp_opt_tokens[i].pattern);
1933 static ssize_t srp_create_target(struct device *dev,
1934 struct device_attribute *attr,
1935 const char *buf, size_t count)
1937 struct srp_host *host =
1938 container_of(dev, struct srp_host, dev);
1939 struct Scsi_Host *target_host;
1940 struct srp_target_port *target;
1944 target_host = scsi_host_alloc(&srp_template,
1945 sizeof (struct srp_target_port));
1949 target_host->transportt = ib_srp_transport_template;
1950 target_host->max_lun = SRP_MAX_LUN;
1951 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
1953 target = host_to_target(target_host);
1955 target->io_class = SRP_REV16A_IB_IO_CLASS;
1956 target->scsi_host = target_host;
1957 target->srp_host = host;
1958 target->lkey = host->srp_dev->mr->lkey;
1959 target->rkey = host->srp_dev->mr->rkey;
1961 spin_lock_init(&target->lock);
1962 INIT_LIST_HEAD(&target->free_tx);
1963 INIT_LIST_HEAD(&target->free_reqs);
1964 for (i = 0; i < SRP_CMD_SQ_SIZE; ++i) {
1965 target->req_ring[i].index = i;
1966 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1969 ret = srp_parse_options(buf, target);
1973 ib_query_gid(host->srp_dev->dev, host->port, 0, &target->path.sgid);
1975 shost_printk(KERN_DEBUG, target->scsi_host, PFX
1976 "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1977 "service_id %016llx dgid %pI6\n",
1978 (unsigned long long) be64_to_cpu(target->id_ext),
1979 (unsigned long long) be64_to_cpu(target->ioc_guid),
1980 be16_to_cpu(target->path.pkey),
1981 (unsigned long long) be64_to_cpu(target->service_id),
1982 target->path.dgid.raw);
1984 ret = srp_create_target_ib(target);
1988 ret = srp_new_cm_id(target);
1992 target->qp_in_error = 0;
1993 ret = srp_connect_target(target);
1995 shost_printk(KERN_ERR, target->scsi_host,
1996 PFX "Connection failed\n");
2000 ret = srp_add_target(host, target);
2002 goto err_disconnect;
2007 srp_disconnect_target(target);
2010 ib_destroy_cm_id(target->cm_id);
2013 srp_free_target_ib(target);
2016 scsi_host_put(target_host);
2021 static DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
2023 static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
2026 struct srp_host *host = container_of(dev, struct srp_host, dev);
2028 return sprintf(buf, "%s\n", host->srp_dev->dev->name);
2031 static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
2033 static ssize_t show_port(struct device *dev, struct device_attribute *attr,
2036 struct srp_host *host = container_of(dev, struct srp_host, dev);
2038 return sprintf(buf, "%d\n", host->port);
2041 static DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
2043 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
2045 struct srp_host *host;
2047 host = kzalloc(sizeof *host, GFP_KERNEL);
2051 INIT_LIST_HEAD(&host->target_list);
2052 spin_lock_init(&host->target_lock);
2053 init_completion(&host->released);
2054 host->srp_dev = device;
2057 host->dev.class = &srp_class;
2058 host->dev.parent = device->dev->dma_device;
2059 dev_set_name(&host->dev, "srp-%s-%d", device->dev->name, port);
2061 if (device_register(&host->dev))
2063 if (device_create_file(&host->dev, &dev_attr_add_target))
2065 if (device_create_file(&host->dev, &dev_attr_ibdev))
2067 if (device_create_file(&host->dev, &dev_attr_port))
2073 device_unregister(&host->dev);
2081 static void srp_add_one(struct ib_device *device)
2083 struct srp_device *srp_dev;
2084 struct ib_device_attr *dev_attr;
2085 struct ib_fmr_pool_param fmr_param;
2086 struct srp_host *host;
2089 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
2093 if (ib_query_device(device, dev_attr)) {
2094 printk(KERN_WARNING PFX "Query device failed for %s\n",
2099 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
2104 * Use the smallest page size supported by the HCA, down to a
2105 * minimum of 512 bytes (which is the smallest sector that a
2106 * SCSI command will ever carry).
2108 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
2109 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
2110 srp_dev->fmr_page_mask = ~((u64) srp_dev->fmr_page_size - 1);
2112 INIT_LIST_HEAD(&srp_dev->dev_list);
2114 srp_dev->dev = device;
2115 srp_dev->pd = ib_alloc_pd(device);
2116 if (IS_ERR(srp_dev->pd))
2119 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
2120 IB_ACCESS_LOCAL_WRITE |
2121 IB_ACCESS_REMOTE_READ |
2122 IB_ACCESS_REMOTE_WRITE);
2123 if (IS_ERR(srp_dev->mr))
2126 memset(&fmr_param, 0, sizeof fmr_param);
2127 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
2128 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
2129 fmr_param.cache = 1;
2130 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
2131 fmr_param.page_shift = srp_dev->fmr_page_shift;
2132 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
2133 IB_ACCESS_REMOTE_WRITE |
2134 IB_ACCESS_REMOTE_READ);
2136 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
2137 if (IS_ERR(srp_dev->fmr_pool))
2138 srp_dev->fmr_pool = NULL;
2140 if (device->node_type == RDMA_NODE_IB_SWITCH) {
2145 e = device->phys_port_cnt;
2148 for (p = s; p <= e; ++p) {
2149 host = srp_add_port(srp_dev, p);
2151 list_add_tail(&host->list, &srp_dev->dev_list);
2154 ib_set_client_data(device, &srp_client, srp_dev);
2159 ib_dealloc_pd(srp_dev->pd);
2168 static void srp_remove_one(struct ib_device *device)
2170 struct srp_device *srp_dev;
2171 struct srp_host *host, *tmp_host;
2172 LIST_HEAD(target_list);
2173 struct srp_target_port *target, *tmp_target;
2175 srp_dev = ib_get_client_data(device, &srp_client);
2177 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
2178 device_unregister(&host->dev);
2180 * Wait for the sysfs entry to go away, so that no new
2181 * target ports can be created.
2183 wait_for_completion(&host->released);
2186 * Mark all target ports as removed, so we stop queueing
2187 * commands and don't try to reconnect.
2189 spin_lock(&host->target_lock);
2190 list_for_each_entry(target, &host->target_list, list) {
2191 spin_lock_irq(&target->lock);
2192 target->state = SRP_TARGET_REMOVED;
2193 spin_unlock_irq(&target->lock);
2195 spin_unlock(&host->target_lock);
2198 * Wait for any reconnection tasks that may have
2199 * started before we marked our target ports as
2200 * removed, and any target port removal tasks.
2202 flush_scheduled_work();
2204 list_for_each_entry_safe(target, tmp_target,
2205 &host->target_list, list) {
2206 srp_remove_host(target->scsi_host);
2207 scsi_remove_host(target->scsi_host);
2208 srp_disconnect_target(target);
2209 ib_destroy_cm_id(target->cm_id);
2210 srp_free_target_ib(target);
2211 scsi_host_put(target->scsi_host);
2217 if (srp_dev->fmr_pool)
2218 ib_destroy_fmr_pool(srp_dev->fmr_pool);
2219 ib_dereg_mr(srp_dev->mr);
2220 ib_dealloc_pd(srp_dev->pd);
2225 static struct srp_function_template ib_srp_transport_functions = {
2228 static int __init srp_init_module(void)
2232 BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *));
2234 if (srp_sg_tablesize > 255) {
2235 printk(KERN_WARNING PFX "Clamping srp_sg_tablesize to 255\n");
2236 srp_sg_tablesize = 255;
2239 ib_srp_transport_template =
2240 srp_attach_transport(&ib_srp_transport_functions);
2241 if (!ib_srp_transport_template)
2244 srp_template.sg_tablesize = srp_sg_tablesize;
2245 srp_max_iu_len = (sizeof (struct srp_cmd) +
2246 sizeof (struct srp_indirect_buf) +
2247 srp_sg_tablesize * 16);
2249 ret = class_register(&srp_class);
2251 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
2252 srp_release_transport(ib_srp_transport_template);
2256 ib_sa_register_client(&srp_sa_client);
2258 ret = ib_register_client(&srp_client);
2260 printk(KERN_ERR PFX "couldn't register IB client\n");
2261 srp_release_transport(ib_srp_transport_template);
2262 ib_sa_unregister_client(&srp_sa_client);
2263 class_unregister(&srp_class);
2270 static void __exit srp_cleanup_module(void)
2272 ib_unregister_client(&srp_client);
2273 ib_sa_unregister_client(&srp_sa_client);
2274 class_unregister(&srp_class);
2275 srp_release_transport(ib_srp_transport_template);
2278 module_init(srp_init_module);
2279 module_exit(srp_cleanup_module);