2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * $Id: ib_srp.c 3932 2005-11-01 17:19:29Z roland $
35 #include <linux/module.h>
36 #include <linux/init.h>
37 #include <linux/slab.h>
38 #include <linux/err.h>
39 #include <linux/string.h>
40 #include <linux/parser.h>
41 #include <linux/random.h>
42 #include <linux/jiffies.h>
44 #include <asm/atomic.h>
46 #include <scsi/scsi.h>
47 #include <scsi/scsi_device.h>
48 #include <scsi/scsi_dbg.h>
51 #include <rdma/ib_cache.h>
55 #define DRV_NAME "ib_srp"
56 #define PFX DRV_NAME ": "
57 #define DRV_VERSION "0.2"
58 #define DRV_RELDATE "November 1, 2005"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol initiator "
62 "v" DRV_VERSION " (" DRV_RELDATE ")");
63 MODULE_LICENSE("Dual BSD/GPL");
65 static int srp_sg_tablesize = SRP_DEF_SG_TABLESIZE;
66 static int srp_max_iu_len;
68 module_param(srp_sg_tablesize, int, 0444);
69 MODULE_PARM_DESC(srp_sg_tablesize,
70 "Max number of gather/scatter entries per I/O (default is 12)");
72 static int topspin_workarounds = 1;
74 module_param(topspin_workarounds, int, 0444);
75 MODULE_PARM_DESC(topspin_workarounds,
76 "Enable workarounds for Topspin/Cisco SRP target bugs if != 0");
78 static const u8 topspin_oui[3] = { 0x00, 0x05, 0xad };
80 static void srp_add_one(struct ib_device *device);
81 static void srp_remove_one(struct ib_device *device);
82 static void srp_completion(struct ib_cq *cq, void *target_ptr);
83 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
85 static struct ib_client srp_client = {
88 .remove = srp_remove_one
91 static inline struct srp_target_port *host_to_target(struct Scsi_Host *host)
93 return (struct srp_target_port *) host->hostdata;
96 static const char *srp_target_info(struct Scsi_Host *host)
98 return host_to_target(host)->target_name;
101 static struct srp_iu *srp_alloc_iu(struct srp_host *host, size_t size,
103 enum dma_data_direction direction)
107 iu = kmalloc(sizeof *iu, gfp_mask);
111 iu->buf = kzalloc(size, gfp_mask);
115 iu->dma = dma_map_single(host->dev->dev->dma_device,
116 iu->buf, size, direction);
117 if (dma_mapping_error(iu->dma))
121 iu->direction = direction;
133 static void srp_free_iu(struct srp_host *host, struct srp_iu *iu)
138 dma_unmap_single(host->dev->dev->dma_device,
139 iu->dma, iu->size, iu->direction);
144 static void srp_qp_event(struct ib_event *event, void *context)
146 printk(KERN_ERR PFX "QP event %d\n", event->event);
149 static int srp_init_qp(struct srp_target_port *target,
152 struct ib_qp_attr *attr;
155 attr = kmalloc(sizeof *attr, GFP_KERNEL);
159 ret = ib_find_cached_pkey(target->srp_host->dev->dev,
160 target->srp_host->port,
161 be16_to_cpu(target->path.pkey),
166 attr->qp_state = IB_QPS_INIT;
167 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ |
168 IB_ACCESS_REMOTE_WRITE);
169 attr->port_num = target->srp_host->port;
171 ret = ib_modify_qp(qp, attr,
182 static int srp_create_target_ib(struct srp_target_port *target)
184 struct ib_qp_init_attr *init_attr;
187 init_attr = kzalloc(sizeof *init_attr, GFP_KERNEL);
191 target->cq = ib_create_cq(target->srp_host->dev->dev, srp_completion,
192 NULL, target, SRP_CQ_SIZE);
193 if (IS_ERR(target->cq)) {
194 ret = PTR_ERR(target->cq);
198 ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
200 init_attr->event_handler = srp_qp_event;
201 init_attr->cap.max_send_wr = SRP_SQ_SIZE;
202 init_attr->cap.max_recv_wr = SRP_RQ_SIZE;
203 init_attr->cap.max_recv_sge = 1;
204 init_attr->cap.max_send_sge = 1;
205 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
206 init_attr->qp_type = IB_QPT_RC;
207 init_attr->send_cq = target->cq;
208 init_attr->recv_cq = target->cq;
210 target->qp = ib_create_qp(target->srp_host->dev->pd, init_attr);
211 if (IS_ERR(target->qp)) {
212 ret = PTR_ERR(target->qp);
213 ib_destroy_cq(target->cq);
217 ret = srp_init_qp(target, target->qp);
219 ib_destroy_qp(target->qp);
220 ib_destroy_cq(target->cq);
229 static void srp_free_target_ib(struct srp_target_port *target)
233 ib_destroy_qp(target->qp);
234 ib_destroy_cq(target->cq);
236 for (i = 0; i < SRP_RQ_SIZE; ++i)
237 srp_free_iu(target->srp_host, target->rx_ring[i]);
238 for (i = 0; i < SRP_SQ_SIZE + 1; ++i)
239 srp_free_iu(target->srp_host, target->tx_ring[i]);
242 static void srp_path_rec_completion(int status,
243 struct ib_sa_path_rec *pathrec,
246 struct srp_target_port *target = target_ptr;
248 target->status = status;
250 printk(KERN_ERR PFX "Got failed path rec status %d\n", status);
252 target->path = *pathrec;
253 complete(&target->done);
256 static int srp_lookup_path(struct srp_target_port *target)
258 target->path.numb_path = 1;
260 init_completion(&target->done);
262 target->path_query_id = ib_sa_path_rec_get(target->srp_host->dev->dev,
263 target->srp_host->port,
265 IB_SA_PATH_REC_DGID |
266 IB_SA_PATH_REC_SGID |
267 IB_SA_PATH_REC_NUMB_PATH |
269 SRP_PATH_REC_TIMEOUT_MS,
271 srp_path_rec_completion,
272 target, &target->path_query);
273 if (target->path_query_id < 0)
274 return target->path_query_id;
276 wait_for_completion(&target->done);
278 if (target->status < 0)
279 printk(KERN_WARNING PFX "Path record query failed\n");
281 return target->status;
284 static int srp_send_req(struct srp_target_port *target)
287 struct ib_cm_req_param param;
288 struct srp_login_req priv;
292 req = kzalloc(sizeof *req, GFP_KERNEL);
296 req->param.primary_path = &target->path;
297 req->param.alternate_path = NULL;
298 req->param.service_id = target->service_id;
299 req->param.qp_num = target->qp->qp_num;
300 req->param.qp_type = target->qp->qp_type;
301 req->param.private_data = &req->priv;
302 req->param.private_data_len = sizeof req->priv;
303 req->param.flow_control = 1;
305 get_random_bytes(&req->param.starting_psn, 4);
306 req->param.starting_psn &= 0xffffff;
309 * Pick some arbitrary defaults here; we could make these
310 * module parameters if anyone cared about setting them.
312 req->param.responder_resources = 4;
313 req->param.remote_cm_response_timeout = 20;
314 req->param.local_cm_response_timeout = 20;
315 req->param.retry_count = 7;
316 req->param.rnr_retry_count = 7;
317 req->param.max_cm_retries = 15;
319 req->priv.opcode = SRP_LOGIN_REQ;
321 req->priv.req_it_iu_len = cpu_to_be32(srp_max_iu_len);
322 req->priv.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT |
323 SRP_BUF_FORMAT_INDIRECT);
324 memcpy(req->priv.initiator_port_id, target->srp_host->initiator_port_id, 16);
326 * Topspin/Cisco SRP targets will reject our login unless we
327 * zero out the first 8 bytes of our initiator port ID. The
328 * second 8 bytes must be our local node GUID, but we always
331 if (topspin_workarounds && !memcmp(&target->ioc_guid, topspin_oui, 3)) {
332 printk(KERN_DEBUG PFX "Topspin/Cisco initiator port ID workaround "
333 "activated for target GUID %016llx\n",
334 (unsigned long long) be64_to_cpu(target->ioc_guid));
335 memset(req->priv.initiator_port_id, 0, 8);
337 memcpy(req->priv.target_port_id, &target->id_ext, 8);
338 memcpy(req->priv.target_port_id + 8, &target->ioc_guid, 8);
340 status = ib_send_cm_req(target->cm_id, &req->param);
347 static void srp_disconnect_target(struct srp_target_port *target)
349 /* XXX should send SRP_I_LOGOUT request */
351 init_completion(&target->done);
352 if (ib_send_cm_dreq(target->cm_id, NULL, 0)) {
353 printk(KERN_DEBUG PFX "Sending CM DREQ failed\n");
356 wait_for_completion(&target->done);
359 static void srp_remove_work(void *target_ptr)
361 struct srp_target_port *target = target_ptr;
363 spin_lock_irq(target->scsi_host->host_lock);
364 if (target->state != SRP_TARGET_DEAD) {
365 spin_unlock_irq(target->scsi_host->host_lock);
368 target->state = SRP_TARGET_REMOVED;
369 spin_unlock_irq(target->scsi_host->host_lock);
371 spin_lock(&target->srp_host->target_lock);
372 list_del(&target->list);
373 spin_unlock(&target->srp_host->target_lock);
375 scsi_remove_host(target->scsi_host);
376 ib_destroy_cm_id(target->cm_id);
377 srp_free_target_ib(target);
378 scsi_host_put(target->scsi_host);
381 static int srp_connect_target(struct srp_target_port *target)
385 ret = srp_lookup_path(target);
390 init_completion(&target->done);
391 ret = srp_send_req(target);
394 wait_for_completion(&target->done);
397 * The CM event handling code will set status to
398 * SRP_PORT_REDIRECT if we get a port redirect REJ
399 * back, or SRP_DLID_REDIRECT if we get a lid/qp
402 switch (target->status) {
406 case SRP_PORT_REDIRECT:
407 ret = srp_lookup_path(target);
412 case SRP_DLID_REDIRECT:
416 return target->status;
421 static void srp_unmap_data(struct scsi_cmnd *scmnd,
422 struct srp_target_port *target,
423 struct srp_request *req)
425 struct scatterlist *scat;
428 if (!scmnd->request_buffer ||
429 (scmnd->sc_data_direction != DMA_TO_DEVICE &&
430 scmnd->sc_data_direction != DMA_FROM_DEVICE))
434 ib_fmr_pool_unmap(req->fmr);
439 * This handling of non-SG commands can be killed when the
440 * SCSI midlayer no longer generates non-SG commands.
442 if (likely(scmnd->use_sg)) {
443 nents = scmnd->use_sg;
444 scat = scmnd->request_buffer;
447 scat = &req->fake_sg;
450 dma_unmap_sg(target->srp_host->dev->dev->dma_device, scat, nents,
451 scmnd->sc_data_direction);
454 static int srp_reconnect_target(struct srp_target_port *target)
456 struct ib_cm_id *new_cm_id;
457 struct ib_qp_attr qp_attr;
458 struct srp_request *req;
463 spin_lock_irq(target->scsi_host->host_lock);
464 if (target->state != SRP_TARGET_LIVE) {
465 spin_unlock_irq(target->scsi_host->host_lock);
468 target->state = SRP_TARGET_CONNECTING;
469 spin_unlock_irq(target->scsi_host->host_lock);
471 srp_disconnect_target(target);
473 * Now get a new local CM ID so that we avoid confusing the
474 * target in case things are really fouled up.
476 new_cm_id = ib_create_cm_id(target->srp_host->dev->dev,
477 srp_cm_handler, target);
478 if (IS_ERR(new_cm_id)) {
479 ret = PTR_ERR(new_cm_id);
482 ib_destroy_cm_id(target->cm_id);
483 target->cm_id = new_cm_id;
485 qp_attr.qp_state = IB_QPS_RESET;
486 ret = ib_modify_qp(target->qp, &qp_attr, IB_QP_STATE);
490 ret = srp_init_qp(target, target->qp);
494 while (ib_poll_cq(target->cq, 1, &wc) > 0)
497 list_for_each_entry(req, &target->req_queue, list) {
498 req->scmnd->result = DID_RESET << 16;
499 req->scmnd->scsi_done(req->scmnd);
500 srp_unmap_data(req->scmnd, target, req);
506 INIT_LIST_HEAD(&target->free_reqs);
507 INIT_LIST_HEAD(&target->req_queue);
508 for (i = 0; i < SRP_SQ_SIZE; ++i)
509 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
511 ret = srp_connect_target(target);
515 spin_lock_irq(target->scsi_host->host_lock);
516 if (target->state == SRP_TARGET_CONNECTING) {
518 target->state = SRP_TARGET_LIVE;
521 spin_unlock_irq(target->scsi_host->host_lock);
526 printk(KERN_ERR PFX "reconnect failed (%d), removing target port.\n", ret);
529 * We couldn't reconnect, so kill our target port off.
530 * However, we have to defer the real removal because we might
531 * be in the context of the SCSI error handler now, which
532 * would deadlock if we call scsi_remove_host().
534 spin_lock_irq(target->scsi_host->host_lock);
535 if (target->state == SRP_TARGET_CONNECTING) {
536 target->state = SRP_TARGET_DEAD;
537 INIT_WORK(&target->work, srp_remove_work, target);
538 schedule_work(&target->work);
540 spin_unlock_irq(target->scsi_host->host_lock);
545 static int srp_map_fmr(struct srp_device *dev, struct scatterlist *scat,
546 int sg_cnt, struct srp_request *req,
547 struct srp_direct_buf *buf)
560 for (i = 0; i < sg_cnt; ++i) {
561 if (sg_dma_address(&scat[i]) & ~dev->fmr_page_mask) {
567 if ((sg_dma_address(&scat[i]) + sg_dma_len(&scat[i])) &
568 ~dev->fmr_page_mask) {
575 len += sg_dma_len(&scat[i]);
578 page_cnt += len >> dev->fmr_page_shift;
579 if (page_cnt > SRP_FMR_SIZE)
582 dma_pages = kmalloc(sizeof (u64) * page_cnt, GFP_ATOMIC);
587 for (i = 0; i < sg_cnt; ++i)
588 for (j = 0; j < sg_dma_len(&scat[i]); j += dev->fmr_page_size)
589 dma_pages[page_cnt++] =
590 (sg_dma_address(&scat[i]) & dev->fmr_page_mask) + j;
592 req->fmr = ib_fmr_pool_map_phys(dev->fmr_pool,
593 dma_pages, page_cnt, &io_addr);
594 if (IS_ERR(req->fmr)) {
595 ret = PTR_ERR(req->fmr);
599 buf->va = cpu_to_be64(sg_dma_address(&scat[0]) & ~dev->fmr_page_mask);
600 buf->key = cpu_to_be32(req->fmr->fmr->rkey);
601 buf->len = cpu_to_be32(len);
611 static int srp_map_data(struct scsi_cmnd *scmnd, struct srp_target_port *target,
612 struct srp_request *req)
614 struct scatterlist *scat;
615 struct srp_cmd *cmd = req->cmd->buf;
616 int len, nents, count;
617 u8 fmt = SRP_DATA_DESC_DIRECT;
619 if (!scmnd->request_buffer || scmnd->sc_data_direction == DMA_NONE)
620 return sizeof (struct srp_cmd);
622 if (scmnd->sc_data_direction != DMA_FROM_DEVICE &&
623 scmnd->sc_data_direction != DMA_TO_DEVICE) {
624 printk(KERN_WARNING PFX "Unhandled data direction %d\n",
625 scmnd->sc_data_direction);
630 * This handling of non-SG commands can be killed when the
631 * SCSI midlayer no longer generates non-SG commands.
633 if (likely(scmnd->use_sg)) {
634 nents = scmnd->use_sg;
635 scat = scmnd->request_buffer;
638 scat = &req->fake_sg;
639 sg_init_one(scat, scmnd->request_buffer, scmnd->request_bufflen);
642 count = dma_map_sg(target->srp_host->dev->dev->dma_device,
643 scat, nents, scmnd->sc_data_direction);
645 fmt = SRP_DATA_DESC_DIRECT;
646 len = sizeof (struct srp_cmd) + sizeof (struct srp_direct_buf);
650 * The midlayer only generated a single gather/scatter
651 * entry, or DMA mapping coalesced everything to a
652 * single entry. So a direct descriptor along with
653 * the DMA MR suffices.
655 struct srp_direct_buf *buf = (void *) cmd->add_data;
657 buf->va = cpu_to_be64(sg_dma_address(scat));
658 buf->key = cpu_to_be32(target->srp_host->dev->mr->rkey);
659 buf->len = cpu_to_be32(sg_dma_len(scat));
660 } else if (srp_map_fmr(target->srp_host->dev, scat, count, req,
661 (void *) cmd->add_data)) {
663 * FMR mapping failed, and the scatterlist has more
664 * than one entry. Generate an indirect memory
667 struct srp_indirect_buf *buf = (void *) cmd->add_data;
671 fmt = SRP_DATA_DESC_INDIRECT;
672 len = sizeof (struct srp_cmd) +
673 sizeof (struct srp_indirect_buf) +
674 count * sizeof (struct srp_direct_buf);
676 for (i = 0; i < count; ++i) {
677 buf->desc_list[i].va =
678 cpu_to_be64(sg_dma_address(&scat[i]));
679 buf->desc_list[i].key =
680 cpu_to_be32(target->srp_host->dev->mr->rkey);
681 buf->desc_list[i].len =
682 cpu_to_be32(sg_dma_len(&scat[i]));
683 datalen += sg_dma_len(&scat[i]);
686 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
687 cmd->data_out_desc_cnt = count;
689 cmd->data_in_desc_cnt = count;
692 cpu_to_be64(req->cmd->dma + sizeof *cmd + sizeof *buf);
693 buf->table_desc.key =
694 cpu_to_be32(target->srp_host->dev->mr->rkey);
695 buf->table_desc.len =
696 cpu_to_be32(count * sizeof (struct srp_direct_buf));
698 buf->len = cpu_to_be32(datalen);
701 if (scmnd->sc_data_direction == DMA_TO_DEVICE)
702 cmd->buf_fmt = fmt << 4;
709 static void srp_remove_req(struct srp_target_port *target, struct srp_request *req)
711 srp_unmap_data(req->scmnd, target, req);
712 list_move_tail(&req->list, &target->free_reqs);
715 static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
717 struct srp_request *req;
718 struct scsi_cmnd *scmnd;
722 delta = (s32) be32_to_cpu(rsp->req_lim_delta);
724 spin_lock_irqsave(target->scsi_host->host_lock, flags);
726 target->req_lim += delta;
728 req = &target->req_ring[rsp->tag & ~SRP_TAG_TSK_MGMT];
730 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) {
731 if (be32_to_cpu(rsp->resp_data_len) < 4)
732 req->tsk_status = -1;
734 req->tsk_status = rsp->data[3];
735 complete(&req->done);
739 printk(KERN_ERR "Null scmnd for RSP w/tag %016llx\n",
740 (unsigned long long) rsp->tag);
741 scmnd->result = rsp->status;
743 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
744 memcpy(scmnd->sense_buffer, rsp->data +
745 be32_to_cpu(rsp->resp_data_len),
746 min_t(int, be32_to_cpu(rsp->sense_data_len),
747 SCSI_SENSE_BUFFERSIZE));
750 if (rsp->flags & (SRP_RSP_FLAG_DOOVER | SRP_RSP_FLAG_DOUNDER))
751 scmnd->resid = be32_to_cpu(rsp->data_out_res_cnt);
752 else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
753 scmnd->resid = be32_to_cpu(rsp->data_in_res_cnt);
755 if (!req->tsk_mgmt) {
756 scmnd->host_scribble = (void *) -1L;
757 scmnd->scsi_done(scmnd);
759 srp_remove_req(target, req);
764 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
767 static void srp_reconnect_work(void *target_ptr)
769 struct srp_target_port *target = target_ptr;
771 srp_reconnect_target(target);
774 static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
779 iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
781 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
782 target->max_ti_iu_len, DMA_FROM_DEVICE);
784 opcode = *(u8 *) iu->buf;
789 printk(KERN_ERR PFX "recv completion, opcode 0x%02x\n", opcode);
791 for (i = 0; i < wc->byte_len; ++i) {
793 printk(KERN_ERR " [%02x] ", i);
794 printk(" %02x", ((u8 *) iu->buf)[i]);
795 if ((i + 1) % 8 == 0)
799 if (wc->byte_len % 8)
805 srp_process_rsp(target, iu->buf);
809 /* XXX Handle target logout */
810 printk(KERN_WARNING PFX "Got target logout request\n");
814 printk(KERN_WARNING PFX "Unhandled SRP opcode 0x%02x\n", opcode);
818 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
819 target->max_ti_iu_len, DMA_FROM_DEVICE);
822 static void srp_completion(struct ib_cq *cq, void *target_ptr)
824 struct srp_target_port *target = target_ptr;
828 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
829 while (ib_poll_cq(cq, 1, &wc) > 0) {
831 printk(KERN_ERR PFX "failed %s status %d\n",
832 wc.wr_id & SRP_OP_RECV ? "receive" : "send",
834 spin_lock_irqsave(target->scsi_host->host_lock, flags);
835 if (target->state == SRP_TARGET_LIVE)
836 schedule_work(&target->work);
837 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
841 if (wc.wr_id & SRP_OP_RECV)
842 srp_handle_recv(target, &wc);
848 static int __srp_post_recv(struct srp_target_port *target)
852 struct ib_recv_wr wr, *bad_wr;
856 next = target->rx_head & (SRP_RQ_SIZE - 1);
857 wr.wr_id = next | SRP_OP_RECV;
858 iu = target->rx_ring[next];
861 list.length = iu->size;
862 list.lkey = target->srp_host->dev->mr->lkey;
868 ret = ib_post_recv(target->qp, &wr, &bad_wr);
875 static int srp_post_recv(struct srp_target_port *target)
880 spin_lock_irqsave(target->scsi_host->host_lock, flags);
881 ret = __srp_post_recv(target);
882 spin_unlock_irqrestore(target->scsi_host->host_lock, flags);
888 * Must be called with target->scsi_host->host_lock held to protect
889 * req_lim and tx_head. Lock cannot be dropped between call here and
890 * call to __srp_post_send().
892 static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target)
894 if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
897 if (unlikely(target->req_lim < 1))
898 ++target->zero_req_lim;
900 return target->tx_ring[target->tx_head & SRP_SQ_SIZE];
904 * Must be called with target->scsi_host->host_lock held to protect
905 * req_lim and tx_head.
907 static int __srp_post_send(struct srp_target_port *target,
908 struct srp_iu *iu, int len)
911 struct ib_send_wr wr, *bad_wr;
916 list.lkey = target->srp_host->dev->mr->lkey;
919 wr.wr_id = target->tx_head & SRP_SQ_SIZE;
922 wr.opcode = IB_WR_SEND;
923 wr.send_flags = IB_SEND_SIGNALED;
925 ret = ib_post_send(target->qp, &wr, &bad_wr);
935 static int srp_queuecommand(struct scsi_cmnd *scmnd,
936 void (*done)(struct scsi_cmnd *))
938 struct srp_target_port *target = host_to_target(scmnd->device->host);
939 struct srp_request *req;
944 if (target->state == SRP_TARGET_CONNECTING)
947 if (target->state == SRP_TARGET_DEAD ||
948 target->state == SRP_TARGET_REMOVED) {
949 scmnd->result = DID_BAD_TARGET << 16;
954 iu = __srp_get_tx_iu(target);
958 dma_sync_single_for_cpu(target->srp_host->dev->dev->dma_device, iu->dma,
959 srp_max_iu_len, DMA_TO_DEVICE);
961 req = list_entry(target->free_reqs.next, struct srp_request, list);
963 scmnd->scsi_done = done;
965 scmnd->host_scribble = (void *) (long) req->index;
968 memset(cmd, 0, sizeof *cmd);
970 cmd->opcode = SRP_CMD;
971 cmd->lun = cpu_to_be64((u64) scmnd->device->lun << 48);
972 cmd->tag = req->index;
973 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len);
978 req->tsk_mgmt = NULL;
980 len = srp_map_data(scmnd, target, req);
982 printk(KERN_ERR PFX "Failed to map data\n");
986 if (__srp_post_recv(target)) {
987 printk(KERN_ERR PFX "Recv failed\n");
991 dma_sync_single_for_device(target->srp_host->dev->dev->dma_device, iu->dma,
992 srp_max_iu_len, DMA_TO_DEVICE);
994 if (__srp_post_send(target, iu, len)) {
995 printk(KERN_ERR PFX "Send failed\n");
999 list_move_tail(&req->list, &target->req_queue);
1004 srp_unmap_data(scmnd, target, req);
1007 return SCSI_MLQUEUE_HOST_BUSY;
1010 static int srp_alloc_iu_bufs(struct srp_target_port *target)
1014 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1015 target->rx_ring[i] = srp_alloc_iu(target->srp_host,
1016 target->max_ti_iu_len,
1017 GFP_KERNEL, DMA_FROM_DEVICE);
1018 if (!target->rx_ring[i])
1022 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1023 target->tx_ring[i] = srp_alloc_iu(target->srp_host,
1025 GFP_KERNEL, DMA_TO_DEVICE);
1026 if (!target->tx_ring[i])
1033 for (i = 0; i < SRP_RQ_SIZE; ++i) {
1034 srp_free_iu(target->srp_host, target->rx_ring[i]);
1035 target->rx_ring[i] = NULL;
1038 for (i = 0; i < SRP_SQ_SIZE + 1; ++i) {
1039 srp_free_iu(target->srp_host, target->tx_ring[i]);
1040 target->tx_ring[i] = NULL;
1046 static void srp_cm_rej_handler(struct ib_cm_id *cm_id,
1047 struct ib_cm_event *event,
1048 struct srp_target_port *target)
1050 struct ib_class_port_info *cpi;
1053 switch (event->param.rej_rcvd.reason) {
1054 case IB_CM_REJ_PORT_CM_REDIRECT:
1055 cpi = event->param.rej_rcvd.ari;
1056 target->path.dlid = cpi->redirect_lid;
1057 target->path.pkey = cpi->redirect_pkey;
1058 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff;
1059 memcpy(target->path.dgid.raw, cpi->redirect_gid, 16);
1061 target->status = target->path.dlid ?
1062 SRP_DLID_REDIRECT : SRP_PORT_REDIRECT;
1065 case IB_CM_REJ_PORT_REDIRECT:
1066 if (topspin_workarounds &&
1067 !memcmp(&target->ioc_guid, topspin_oui, 3)) {
1069 * Topspin/Cisco SRP gateways incorrectly send
1070 * reject reason code 25 when they mean 24
1073 memcpy(target->path.dgid.raw,
1074 event->param.rej_rcvd.ari, 16);
1076 printk(KERN_DEBUG PFX "Topspin/Cisco redirect to target port GID %016llx%016llx\n",
1077 (unsigned long long) be64_to_cpu(target->path.dgid.global.subnet_prefix),
1078 (unsigned long long) be64_to_cpu(target->path.dgid.global.interface_id));
1080 target->status = SRP_PORT_REDIRECT;
1082 printk(KERN_WARNING " REJ reason: IB_CM_REJ_PORT_REDIRECT\n");
1083 target->status = -ECONNRESET;
1087 case IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID:
1088 printk(KERN_WARNING " REJ reason: IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID\n");
1089 target->status = -ECONNRESET;
1092 case IB_CM_REJ_CONSUMER_DEFINED:
1093 opcode = *(u8 *) event->private_data;
1094 if (opcode == SRP_LOGIN_REJ) {
1095 struct srp_login_rej *rej = event->private_data;
1096 u32 reason = be32_to_cpu(rej->reason);
1098 if (reason == SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE)
1099 printk(KERN_WARNING PFX
1100 "SRP_LOGIN_REJ: requested max_it_iu_len too large\n");
1102 printk(KERN_WARNING PFX
1103 "SRP LOGIN REJECTED, reason 0x%08x\n", reason);
1105 printk(KERN_WARNING " REJ reason: IB_CM_REJ_CONSUMER_DEFINED,"
1106 " opcode 0x%02x\n", opcode);
1107 target->status = -ECONNRESET;
1111 printk(KERN_WARNING " REJ reason 0x%x\n",
1112 event->param.rej_rcvd.reason);
1113 target->status = -ECONNRESET;
1117 static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
1119 struct srp_target_port *target = cm_id->context;
1120 struct ib_qp_attr *qp_attr = NULL;
1125 switch (event->event) {
1126 case IB_CM_REQ_ERROR:
1127 printk(KERN_DEBUG PFX "Sending CM REQ failed\n");
1129 target->status = -ECONNRESET;
1132 case IB_CM_REP_RECEIVED:
1134 opcode = *(u8 *) event->private_data;
1136 if (opcode == SRP_LOGIN_RSP) {
1137 struct srp_login_rsp *rsp = event->private_data;
1139 target->max_ti_iu_len = be32_to_cpu(rsp->max_ti_iu_len);
1140 target->req_lim = be32_to_cpu(rsp->req_lim_delta);
1142 target->scsi_host->can_queue = min(target->req_lim,
1143 target->scsi_host->can_queue);
1145 printk(KERN_WARNING PFX "Unhandled RSP opcode %#x\n", opcode);
1146 target->status = -ECONNRESET;
1150 target->status = srp_alloc_iu_bufs(target);
1154 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
1156 target->status = -ENOMEM;
1160 qp_attr->qp_state = IB_QPS_RTR;
1161 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1165 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1169 target->status = srp_post_recv(target);
1173 qp_attr->qp_state = IB_QPS_RTS;
1174 target->status = ib_cm_init_qp_attr(cm_id, qp_attr, &attr_mask);
1178 target->status = ib_modify_qp(target->qp, qp_attr, attr_mask);
1182 target->status = ib_send_cm_rtu(cm_id, NULL, 0);
1188 case IB_CM_REJ_RECEIVED:
1189 printk(KERN_DEBUG PFX "REJ received\n");
1192 srp_cm_rej_handler(cm_id, event, target);
1195 case IB_CM_DREQ_RECEIVED:
1196 printk(KERN_WARNING PFX "DREQ received - connection closed\n");
1197 if (ib_send_cm_drep(cm_id, NULL, 0))
1198 printk(KERN_ERR PFX "Sending CM DREP failed\n");
1201 case IB_CM_TIMEWAIT_EXIT:
1202 printk(KERN_ERR PFX "connection closed\n");
1208 case IB_CM_MRA_RECEIVED:
1209 case IB_CM_DREQ_ERROR:
1210 case IB_CM_DREP_RECEIVED:
1214 printk(KERN_WARNING PFX "Unhandled CM event %d\n", event->event);
1219 complete(&target->done);
1226 static int srp_send_tsk_mgmt(struct srp_target_port *target,
1227 struct srp_request *req, u8 func)
1230 struct srp_tsk_mgmt *tsk_mgmt;
1232 spin_lock_irq(target->scsi_host->host_lock);
1234 if (target->state == SRP_TARGET_DEAD ||
1235 target->state == SRP_TARGET_REMOVED) {
1236 req->scmnd->result = DID_BAD_TARGET << 16;
1240 init_completion(&req->done);
1242 iu = __srp_get_tx_iu(target);
1247 memset(tsk_mgmt, 0, sizeof *tsk_mgmt);
1249 tsk_mgmt->opcode = SRP_TSK_MGMT;
1250 tsk_mgmt->lun = cpu_to_be64((u64) req->scmnd->device->lun << 48);
1251 tsk_mgmt->tag = req->index | SRP_TAG_TSK_MGMT;
1252 tsk_mgmt->tsk_mgmt_func = func;
1253 tsk_mgmt->task_tag = req->index;
1255 if (__srp_post_send(target, iu, sizeof *tsk_mgmt))
1260 spin_unlock_irq(target->scsi_host->host_lock);
1262 if (!wait_for_completion_timeout(&req->done,
1263 msecs_to_jiffies(SRP_ABORT_TIMEOUT_MS)))
1269 spin_unlock_irq(target->scsi_host->host_lock);
1273 static int srp_find_req(struct srp_target_port *target,
1274 struct scsi_cmnd *scmnd,
1275 struct srp_request **req)
1277 if (scmnd->host_scribble == (void *) -1L)
1280 *req = &target->req_ring[(long) scmnd->host_scribble];
1285 static int srp_abort(struct scsi_cmnd *scmnd)
1287 struct srp_target_port *target = host_to_target(scmnd->device->host);
1288 struct srp_request *req;
1291 printk(KERN_ERR "SRP abort called\n");
1293 if (srp_find_req(target, scmnd, &req))
1295 if (srp_send_tsk_mgmt(target, req, SRP_TSK_ABORT_TASK))
1298 spin_lock_irq(target->scsi_host->host_lock);
1300 if (req->cmd_done) {
1301 srp_remove_req(target, req);
1302 scmnd->scsi_done(scmnd);
1303 } else if (!req->tsk_status) {
1304 srp_remove_req(target, req);
1305 scmnd->result = DID_ABORT << 16;
1309 spin_unlock_irq(target->scsi_host->host_lock);
1314 static int srp_reset_device(struct scsi_cmnd *scmnd)
1316 struct srp_target_port *target = host_to_target(scmnd->device->host);
1317 struct srp_request *req, *tmp;
1319 printk(KERN_ERR "SRP reset_device called\n");
1321 if (srp_find_req(target, scmnd, &req))
1323 if (srp_send_tsk_mgmt(target, req, SRP_TSK_LUN_RESET))
1325 if (req->tsk_status)
1328 spin_lock_irq(target->scsi_host->host_lock);
1330 list_for_each_entry_safe(req, tmp, &target->req_queue, list)
1331 if (req->scmnd->device == scmnd->device) {
1332 req->scmnd->result = DID_RESET << 16;
1333 req->scmnd->scsi_done(req->scmnd);
1334 srp_remove_req(target, req);
1337 spin_unlock_irq(target->scsi_host->host_lock);
1342 static int srp_reset_host(struct scsi_cmnd *scmnd)
1344 struct srp_target_port *target = host_to_target(scmnd->device->host);
1347 printk(KERN_ERR PFX "SRP reset_host called\n");
1349 if (!srp_reconnect_target(target))
1355 static ssize_t show_id_ext(struct class_device *cdev, char *buf)
1357 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1359 if (target->state == SRP_TARGET_DEAD ||
1360 target->state == SRP_TARGET_REMOVED)
1363 return sprintf(buf, "0x%016llx\n",
1364 (unsigned long long) be64_to_cpu(target->id_ext));
1367 static ssize_t show_ioc_guid(struct class_device *cdev, char *buf)
1369 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1371 if (target->state == SRP_TARGET_DEAD ||
1372 target->state == SRP_TARGET_REMOVED)
1375 return sprintf(buf, "0x%016llx\n",
1376 (unsigned long long) be64_to_cpu(target->ioc_guid));
1379 static ssize_t show_service_id(struct class_device *cdev, char *buf)
1381 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1383 if (target->state == SRP_TARGET_DEAD ||
1384 target->state == SRP_TARGET_REMOVED)
1387 return sprintf(buf, "0x%016llx\n",
1388 (unsigned long long) be64_to_cpu(target->service_id));
1391 static ssize_t show_pkey(struct class_device *cdev, char *buf)
1393 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1395 if (target->state == SRP_TARGET_DEAD ||
1396 target->state == SRP_TARGET_REMOVED)
1399 return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey));
1402 static ssize_t show_dgid(struct class_device *cdev, char *buf)
1404 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1406 if (target->state == SRP_TARGET_DEAD ||
1407 target->state == SRP_TARGET_REMOVED)
1410 return sprintf(buf, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1411 be16_to_cpu(((__be16 *) target->path.dgid.raw)[0]),
1412 be16_to_cpu(((__be16 *) target->path.dgid.raw)[1]),
1413 be16_to_cpu(((__be16 *) target->path.dgid.raw)[2]),
1414 be16_to_cpu(((__be16 *) target->path.dgid.raw)[3]),
1415 be16_to_cpu(((__be16 *) target->path.dgid.raw)[4]),
1416 be16_to_cpu(((__be16 *) target->path.dgid.raw)[5]),
1417 be16_to_cpu(((__be16 *) target->path.dgid.raw)[6]),
1418 be16_to_cpu(((__be16 *) target->path.dgid.raw)[7]));
1421 static ssize_t show_zero_req_lim(struct class_device *cdev, char *buf)
1423 struct srp_target_port *target = host_to_target(class_to_shost(cdev));
1425 if (target->state == SRP_TARGET_DEAD ||
1426 target->state == SRP_TARGET_REMOVED)
1429 return sprintf(buf, "%d\n", target->zero_req_lim);
1432 static CLASS_DEVICE_ATTR(id_ext, S_IRUGO, show_id_ext, NULL);
1433 static CLASS_DEVICE_ATTR(ioc_guid, S_IRUGO, show_ioc_guid, NULL);
1434 static CLASS_DEVICE_ATTR(service_id, S_IRUGO, show_service_id, NULL);
1435 static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
1436 static CLASS_DEVICE_ATTR(dgid, S_IRUGO, show_dgid, NULL);
1437 static CLASS_DEVICE_ATTR(zero_req_lim, S_IRUGO, show_zero_req_lim, NULL);
1439 static struct class_device_attribute *srp_host_attrs[] = {
1440 &class_device_attr_id_ext,
1441 &class_device_attr_ioc_guid,
1442 &class_device_attr_service_id,
1443 &class_device_attr_pkey,
1444 &class_device_attr_dgid,
1445 &class_device_attr_zero_req_lim,
1449 static struct scsi_host_template srp_template = {
1450 .module = THIS_MODULE,
1452 .info = srp_target_info,
1453 .queuecommand = srp_queuecommand,
1454 .eh_abort_handler = srp_abort,
1455 .eh_device_reset_handler = srp_reset_device,
1456 .eh_host_reset_handler = srp_reset_host,
1457 .can_queue = SRP_SQ_SIZE,
1459 .cmd_per_lun = SRP_SQ_SIZE,
1460 .use_clustering = ENABLE_CLUSTERING,
1461 .shost_attrs = srp_host_attrs
1464 static int srp_add_target(struct srp_host *host, struct srp_target_port *target)
1466 sprintf(target->target_name, "SRP.T10:%016llX",
1467 (unsigned long long) be64_to_cpu(target->id_ext));
1469 if (scsi_add_host(target->scsi_host, host->dev->dev->dma_device))
1472 spin_lock(&host->target_lock);
1473 list_add_tail(&target->list, &host->target_list);
1474 spin_unlock(&host->target_lock);
1476 target->state = SRP_TARGET_LIVE;
1478 scsi_scan_target(&target->scsi_host->shost_gendev,
1479 0, target->scsi_id, SCAN_WILD_CARD, 0);
1484 static void srp_release_class_dev(struct class_device *class_dev)
1486 struct srp_host *host =
1487 container_of(class_dev, struct srp_host, class_dev);
1489 complete(&host->released);
1492 static struct class srp_class = {
1493 .name = "infiniband_srp",
1494 .release = srp_release_class_dev
1498 * Target ports are added by writing
1500 * id_ext=<SRP ID ext>,ioc_guid=<SRP IOC GUID>,dgid=<dest GID>,
1501 * pkey=<P_Key>,service_id=<service ID>
1503 * to the add_target sysfs attribute.
1507 SRP_OPT_ID_EXT = 1 << 0,
1508 SRP_OPT_IOC_GUID = 1 << 1,
1509 SRP_OPT_DGID = 1 << 2,
1510 SRP_OPT_PKEY = 1 << 3,
1511 SRP_OPT_SERVICE_ID = 1 << 4,
1512 SRP_OPT_MAX_SECT = 1 << 5,
1513 SRP_OPT_MAX_CMD_PER_LUN = 1 << 6,
1514 SRP_OPT_ALL = (SRP_OPT_ID_EXT |
1518 SRP_OPT_SERVICE_ID),
1521 static match_table_t srp_opt_tokens = {
1522 { SRP_OPT_ID_EXT, "id_ext=%s" },
1523 { SRP_OPT_IOC_GUID, "ioc_guid=%s" },
1524 { SRP_OPT_DGID, "dgid=%s" },
1525 { SRP_OPT_PKEY, "pkey=%x" },
1526 { SRP_OPT_SERVICE_ID, "service_id=%s" },
1527 { SRP_OPT_MAX_SECT, "max_sect=%d" },
1528 { SRP_OPT_MAX_CMD_PER_LUN, "max_cmd_per_lun=%d" },
1529 { SRP_OPT_ERR, NULL }
1532 static int srp_parse_options(const char *buf, struct srp_target_port *target)
1534 char *options, *sep_opt;
1537 substring_t args[MAX_OPT_ARGS];
1543 options = kstrdup(buf, GFP_KERNEL);
1548 while ((p = strsep(&sep_opt, ",")) != NULL) {
1552 token = match_token(p, srp_opt_tokens, args);
1556 case SRP_OPT_ID_EXT:
1557 p = match_strdup(args);
1558 target->id_ext = cpu_to_be64(simple_strtoull(p, NULL, 16));
1562 case SRP_OPT_IOC_GUID:
1563 p = match_strdup(args);
1564 target->ioc_guid = cpu_to_be64(simple_strtoull(p, NULL, 16));
1569 p = match_strdup(args);
1570 if (strlen(p) != 32) {
1571 printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p);
1576 for (i = 0; i < 16; ++i) {
1577 strlcpy(dgid, p + i * 2, 3);
1578 target->path.dgid.raw[i] = simple_strtoul(dgid, NULL, 16);
1584 if (match_hex(args, &token)) {
1585 printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p);
1588 target->path.pkey = cpu_to_be16(token);
1591 case SRP_OPT_SERVICE_ID:
1592 p = match_strdup(args);
1593 target->service_id = cpu_to_be64(simple_strtoull(p, NULL, 16));
1597 case SRP_OPT_MAX_SECT:
1598 if (match_int(args, &token)) {
1599 printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p);
1602 target->scsi_host->max_sectors = token;
1605 case SRP_OPT_MAX_CMD_PER_LUN:
1606 if (match_int(args, &token)) {
1607 printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p);
1610 target->scsi_host->cmd_per_lun = min(token, SRP_SQ_SIZE);
1614 printk(KERN_WARNING PFX "unknown parameter or missing value "
1615 "'%s' in target creation request\n", p);
1620 if ((opt_mask & SRP_OPT_ALL) == SRP_OPT_ALL)
1623 for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i)
1624 if ((srp_opt_tokens[i].token & SRP_OPT_ALL) &&
1625 !(srp_opt_tokens[i].token & opt_mask))
1626 printk(KERN_WARNING PFX "target creation request is "
1627 "missing parameter '%s'\n",
1628 srp_opt_tokens[i].pattern);
1635 static ssize_t srp_create_target(struct class_device *class_dev,
1636 const char *buf, size_t count)
1638 struct srp_host *host =
1639 container_of(class_dev, struct srp_host, class_dev);
1640 struct Scsi_Host *target_host;
1641 struct srp_target_port *target;
1645 target_host = scsi_host_alloc(&srp_template,
1646 sizeof (struct srp_target_port));
1650 target_host->max_lun = SRP_MAX_LUN;
1652 target = host_to_target(target_host);
1653 memset(target, 0, sizeof *target);
1655 target->scsi_host = target_host;
1656 target->srp_host = host;
1658 INIT_WORK(&target->work, srp_reconnect_work, target);
1660 INIT_LIST_HEAD(&target->free_reqs);
1661 INIT_LIST_HEAD(&target->req_queue);
1662 for (i = 0; i < SRP_SQ_SIZE; ++i) {
1663 target->req_ring[i].index = i;
1664 list_add_tail(&target->req_ring[i].list, &target->free_reqs);
1667 ret = srp_parse_options(buf, target);
1671 ib_get_cached_gid(host->dev->dev, host->port, 0, &target->path.sgid);
1673 printk(KERN_DEBUG PFX "new target: id_ext %016llx ioc_guid %016llx pkey %04x "
1674 "service_id %016llx dgid %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
1675 (unsigned long long) be64_to_cpu(target->id_ext),
1676 (unsigned long long) be64_to_cpu(target->ioc_guid),
1677 be16_to_cpu(target->path.pkey),
1678 (unsigned long long) be64_to_cpu(target->service_id),
1679 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[0]),
1680 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[2]),
1681 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[4]),
1682 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[6]),
1683 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[8]),
1684 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[10]),
1685 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[12]),
1686 (int) be16_to_cpu(*(__be16 *) &target->path.dgid.raw[14]));
1688 ret = srp_create_target_ib(target);
1692 target->cm_id = ib_create_cm_id(host->dev->dev, srp_cm_handler, target);
1693 if (IS_ERR(target->cm_id)) {
1694 ret = PTR_ERR(target->cm_id);
1698 ret = srp_connect_target(target);
1700 printk(KERN_ERR PFX "Connection failed\n");
1704 ret = srp_add_target(host, target);
1706 goto err_disconnect;
1711 srp_disconnect_target(target);
1714 ib_destroy_cm_id(target->cm_id);
1717 srp_free_target_ib(target);
1720 scsi_host_put(target_host);
1725 static CLASS_DEVICE_ATTR(add_target, S_IWUSR, NULL, srp_create_target);
1727 static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
1729 struct srp_host *host =
1730 container_of(class_dev, struct srp_host, class_dev);
1732 return sprintf(buf, "%s\n", host->dev->dev->name);
1735 static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
1737 static ssize_t show_port(struct class_device *class_dev, char *buf)
1739 struct srp_host *host =
1740 container_of(class_dev, struct srp_host, class_dev);
1742 return sprintf(buf, "%d\n", host->port);
1745 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
1747 static struct srp_host *srp_add_port(struct srp_device *device, u8 port)
1749 struct srp_host *host;
1751 host = kzalloc(sizeof *host, GFP_KERNEL);
1755 INIT_LIST_HEAD(&host->target_list);
1756 spin_lock_init(&host->target_lock);
1757 init_completion(&host->released);
1761 host->initiator_port_id[7] = port;
1762 memcpy(host->initiator_port_id + 8, &device->dev->node_guid, 8);
1764 host->class_dev.class = &srp_class;
1765 host->class_dev.dev = device->dev->dma_device;
1766 snprintf(host->class_dev.class_id, BUS_ID_SIZE, "srp-%s-%d",
1767 device->dev->name, port);
1769 if (class_device_register(&host->class_dev))
1771 if (class_device_create_file(&host->class_dev, &class_device_attr_add_target))
1773 if (class_device_create_file(&host->class_dev, &class_device_attr_ibdev))
1775 if (class_device_create_file(&host->class_dev, &class_device_attr_port))
1781 class_device_unregister(&host->class_dev);
1789 static void srp_add_one(struct ib_device *device)
1791 struct srp_device *srp_dev;
1792 struct ib_device_attr *dev_attr;
1793 struct ib_fmr_pool_param fmr_param;
1794 struct srp_host *host;
1797 dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
1801 if (ib_query_device(device, dev_attr)) {
1802 printk(KERN_WARNING PFX "Query device failed for %s\n",
1807 srp_dev = kmalloc(sizeof *srp_dev, GFP_KERNEL);
1812 * Use the smallest page size supported by the HCA, down to a
1813 * minimum of 512 bytes (which is the smallest sector that a
1814 * SCSI command will ever carry).
1816 srp_dev->fmr_page_shift = max(9, ffs(dev_attr->page_size_cap) - 1);
1817 srp_dev->fmr_page_size = 1 << srp_dev->fmr_page_shift;
1818 srp_dev->fmr_page_mask = ~((unsigned long) srp_dev->fmr_page_size - 1);
1820 INIT_LIST_HEAD(&srp_dev->dev_list);
1822 srp_dev->dev = device;
1823 srp_dev->pd = ib_alloc_pd(device);
1824 if (IS_ERR(srp_dev->pd))
1827 srp_dev->mr = ib_get_dma_mr(srp_dev->pd,
1828 IB_ACCESS_LOCAL_WRITE |
1829 IB_ACCESS_REMOTE_READ |
1830 IB_ACCESS_REMOTE_WRITE);
1831 if (IS_ERR(srp_dev->mr))
1834 memset(&fmr_param, 0, sizeof fmr_param);
1835 fmr_param.pool_size = SRP_FMR_POOL_SIZE;
1836 fmr_param.dirty_watermark = SRP_FMR_DIRTY_SIZE;
1837 fmr_param.cache = 1;
1838 fmr_param.max_pages_per_fmr = SRP_FMR_SIZE;
1839 fmr_param.page_shift = srp_dev->fmr_page_shift;
1840 fmr_param.access = (IB_ACCESS_LOCAL_WRITE |
1841 IB_ACCESS_REMOTE_WRITE |
1842 IB_ACCESS_REMOTE_READ);
1844 srp_dev->fmr_pool = ib_create_fmr_pool(srp_dev->pd, &fmr_param);
1845 if (IS_ERR(srp_dev->fmr_pool))
1846 srp_dev->fmr_pool = NULL;
1848 if (device->node_type == IB_NODE_SWITCH) {
1853 e = device->phys_port_cnt;
1856 for (p = s; p <= e; ++p) {
1857 host = srp_add_port(srp_dev, p);
1859 list_add_tail(&host->list, &srp_dev->dev_list);
1862 ib_set_client_data(device, &srp_client, srp_dev);
1867 ib_dealloc_pd(srp_dev->pd);
1876 static void srp_remove_one(struct ib_device *device)
1878 struct srp_device *srp_dev;
1879 struct srp_host *host, *tmp_host;
1880 LIST_HEAD(target_list);
1881 struct srp_target_port *target, *tmp_target;
1883 srp_dev = ib_get_client_data(device, &srp_client);
1885 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) {
1886 class_device_unregister(&host->class_dev);
1888 * Wait for the sysfs entry to go away, so that no new
1889 * target ports can be created.
1891 wait_for_completion(&host->released);
1894 * Mark all target ports as removed, so we stop queueing
1895 * commands and don't try to reconnect.
1897 spin_lock(&host->target_lock);
1898 list_for_each_entry(target, &host->target_list, list) {
1899 spin_lock_irq(target->scsi_host->host_lock);
1900 target->state = SRP_TARGET_REMOVED;
1901 spin_unlock_irq(target->scsi_host->host_lock);
1903 spin_unlock(&host->target_lock);
1906 * Wait for any reconnection tasks that may have
1907 * started before we marked our target ports as
1908 * removed, and any target port removal tasks.
1910 flush_scheduled_work();
1912 list_for_each_entry_safe(target, tmp_target,
1913 &host->target_list, list) {
1914 scsi_remove_host(target->scsi_host);
1915 srp_disconnect_target(target);
1916 ib_destroy_cm_id(target->cm_id);
1917 srp_free_target_ib(target);
1918 scsi_host_put(target->scsi_host);
1924 if (srp_dev->fmr_pool)
1925 ib_destroy_fmr_pool(srp_dev->fmr_pool);
1926 ib_dereg_mr(srp_dev->mr);
1927 ib_dealloc_pd(srp_dev->pd);
1932 static int __init srp_init_module(void)
1936 srp_template.sg_tablesize = srp_sg_tablesize;
1937 srp_max_iu_len = (sizeof (struct srp_cmd) +
1938 sizeof (struct srp_indirect_buf) +
1939 srp_sg_tablesize * 16);
1941 ret = class_register(&srp_class);
1943 printk(KERN_ERR PFX "couldn't register class infiniband_srp\n");
1947 ret = ib_register_client(&srp_client);
1949 printk(KERN_ERR PFX "couldn't register IB client\n");
1950 class_unregister(&srp_class);
1957 static void __exit srp_cleanup_module(void)
1959 ib_unregister_client(&srp_client);
1960 class_unregister(&srp_class);
1963 module_init(srp_init_module);
1964 module_exit(srp_cleanup_module);