2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #include <linux/module.h>
18 #include <linux/parser.h>
19 #include <uapi/scsi/fc/fc_fs.h>
21 #include "../host/nvme.h"
22 #include "../target/nvmet.h"
23 #include <linux/nvme-fc-driver.h>
24 #include <linux/nvme-fc.h>
29 NVMF_OPT_WWNN = 1 << 0,
30 NVMF_OPT_WWPN = 1 << 1,
31 NVMF_OPT_ROLES = 1 << 2,
32 NVMF_OPT_FCADDR = 1 << 3,
33 NVMF_OPT_LPWWNN = 1 << 4,
34 NVMF_OPT_LPWWPN = 1 << 5,
37 struct fcloop_ctrl_options {
47 static const match_table_t opt_tokens = {
48 { NVMF_OPT_WWNN, "wwnn=%s" },
49 { NVMF_OPT_WWPN, "wwpn=%s" },
50 { NVMF_OPT_ROLES, "roles=%d" },
51 { NVMF_OPT_FCADDR, "fcaddr=%x" },
52 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
53 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
54 { NVMF_OPT_ERR, NULL }
58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
66 options = o = kstrdup(buf, GFP_KERNEL);
70 while ((p = strsep(&o, ",\n")) != NULL) {
74 token = match_token(p, opt_tokens, args);
78 if (match_u64(args, &token64)) {
80 goto out_free_options;
85 if (match_u64(args, &token64)) {
87 goto out_free_options;
92 if (match_int(args, &token)) {
94 goto out_free_options;
99 if (match_hex(args, &token)) {
101 goto out_free_options;
103 opts->fcaddr = token;
105 case NVMF_OPT_LPWWNN:
106 if (match_u64(args, &token64)) {
108 goto out_free_options;
110 opts->lpwwnn = token64;
112 case NVMF_OPT_LPWWPN:
113 if (match_u64(args, &token64)) {
115 goto out_free_options;
117 opts->lpwwpn = token64;
120 pr_warn("unknown parameter or missing value '%s'\n", p);
122 goto out_free_options;
133 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
136 substring_t args[MAX_OPT_ARGS];
137 char *options, *o, *p;
144 options = o = kstrdup(buf, GFP_KERNEL);
148 while ((p = strsep(&o, ",\n")) != NULL) {
152 token = match_token(p, opt_tokens, args);
155 if (match_u64(args, &token64)) {
157 goto out_free_options;
162 if (match_u64(args, &token64)) {
164 goto out_free_options;
169 pr_warn("unknown parameter or missing value '%s'\n", p);
171 goto out_free_options;
189 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
191 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
192 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
194 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196 #define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
197 NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
200 static DEFINE_SPINLOCK(fcloop_lock);
201 static LIST_HEAD(fcloop_lports);
202 static LIST_HEAD(fcloop_nports);
204 struct fcloop_lport {
205 struct nvme_fc_local_port *localport;
206 struct list_head lport_list;
207 struct completion unreg_done;
210 struct fcloop_rport {
211 struct nvme_fc_remote_port *remoteport;
212 struct nvmet_fc_target_port *targetport;
213 struct fcloop_nport *nport;
214 struct fcloop_lport *lport;
217 struct fcloop_tport {
218 struct nvmet_fc_target_port *targetport;
219 struct nvme_fc_remote_port *remoteport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
224 struct fcloop_nport {
225 struct fcloop_rport *rport;
226 struct fcloop_tport *tport;
227 struct fcloop_lport *lport;
228 struct list_head nport_list;
230 struct completion rport_unreg_done;
231 struct completion tport_unreg_done;
238 struct fcloop_lsreq {
239 struct fcloop_tport *tport;
240 struct nvmefc_ls_req *lsreq;
241 struct work_struct work;
242 struct nvmefc_tgt_ls_req tgt_ls_req;
246 struct fcloop_fcpreq {
247 struct fcloop_tport *tport;
248 struct nvmefc_fcp_req *fcpreq;
253 struct work_struct work;
254 struct nvmefc_tgt_fcp_req tgt_fcp_req;
257 struct fcloop_ini_fcpreq {
258 struct nvmefc_fcp_req *fcpreq;
259 struct fcloop_fcpreq *tfcp_req;
260 struct work_struct iniwork;
263 static inline struct fcloop_lsreq *
264 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
266 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
269 static inline struct fcloop_fcpreq *
270 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
272 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
277 fcloop_create_queue(struct nvme_fc_local_port *localport,
278 unsigned int qidx, u16 qsize,
286 fcloop_delete_queue(struct nvme_fc_local_port *localport,
287 unsigned int idx, void *handle)
293 * Transmit of LS RSP done (e.g. buffers all set). call back up
294 * initiator "done" flows.
297 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
299 struct fcloop_lsreq *tls_req =
300 container_of(work, struct fcloop_lsreq, work);
301 struct fcloop_tport *tport = tls_req->tport;
302 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
304 if (tport->remoteport)
305 lsreq->done(lsreq, tls_req->status);
309 fcloop_ls_req(struct nvme_fc_local_port *localport,
310 struct nvme_fc_remote_port *remoteport,
311 struct nvmefc_ls_req *lsreq)
313 struct fcloop_lsreq *tls_req = lsreq->private;
314 struct fcloop_rport *rport = remoteport->private;
317 tls_req->lsreq = lsreq;
318 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
320 if (!rport->targetport) {
321 tls_req->status = -ECONNREFUSED;
322 schedule_work(&tls_req->work);
327 tls_req->tport = rport->targetport->private;
328 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
329 lsreq->rqstaddr, lsreq->rqstlen);
335 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
336 struct nvmefc_tgt_ls_req *tgt_lsreq)
338 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
339 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
341 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
342 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
343 lsreq->rsplen : tgt_lsreq->rsplen));
344 tgt_lsreq->done(tgt_lsreq);
346 schedule_work(&tls_req->work);
352 * FCP IO operation done by initiator abort.
353 * call back up initiator "done" flows.
356 fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
358 struct fcloop_ini_fcpreq *inireq =
359 container_of(work, struct fcloop_ini_fcpreq, iniwork);
361 inireq->fcpreq->done(inireq->fcpreq);
365 * FCP IO operation done by target completion.
366 * call back up initiator "done" flows.
369 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
371 struct fcloop_fcpreq *tfcp_req =
372 container_of(work, struct fcloop_fcpreq, work);
373 struct fcloop_tport *tport = tfcp_req->tport;
374 struct nvmefc_fcp_req *fcpreq;
376 spin_lock(&tfcp_req->reqlock);
377 fcpreq = tfcp_req->fcpreq;
378 spin_unlock(&tfcp_req->reqlock);
380 if (tport->remoteport && fcpreq) {
381 fcpreq->status = tfcp_req->status;
382 fcpreq->done(fcpreq);
390 fcloop_fcp_req(struct nvme_fc_local_port *localport,
391 struct nvme_fc_remote_port *remoteport,
392 void *hw_queue_handle,
393 struct nvmefc_fcp_req *fcpreq)
395 struct fcloop_rport *rport = remoteport->private;
396 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
397 struct fcloop_fcpreq *tfcp_req;
400 if (!rport->targetport)
401 return -ECONNREFUSED;
403 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
407 inireq->fcpreq = fcpreq;
408 inireq->tfcp_req = tfcp_req;
409 INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
410 tfcp_req->fcpreq = fcpreq;
411 tfcp_req->tport = rport->targetport->private;
412 spin_lock_init(&tfcp_req->reqlock);
413 INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
415 ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
416 fcpreq->cmdaddr, fcpreq->cmdlen);
422 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
423 struct scatterlist *io_sg, u32 offset, u32 length)
426 u32 data_len, io_len, tlen;
428 io_p = sg_virt(io_sg);
429 io_len = io_sg->length;
432 tlen = min_t(u32, offset, io_len);
436 io_sg = sg_next(io_sg);
437 io_p = sg_virt(io_sg);
438 io_len = io_sg->length;
443 data_p = sg_virt(data_sg);
444 data_len = data_sg->length;
447 tlen = min_t(u32, io_len, data_len);
448 tlen = min_t(u32, tlen, length);
450 if (op == NVMET_FCOP_WRITEDATA)
451 memcpy(data_p, io_p, tlen);
453 memcpy(io_p, data_p, tlen);
458 if ((!io_len) && (length)) {
459 io_sg = sg_next(io_sg);
460 io_p = sg_virt(io_sg);
461 io_len = io_sg->length;
466 if ((!data_len) && (length)) {
467 data_sg = sg_next(data_sg);
468 data_p = sg_virt(data_sg);
469 data_len = data_sg->length;
476 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
477 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
479 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
480 struct nvmefc_fcp_req *fcpreq;
481 u32 rsplen = 0, xfrlen = 0;
482 int fcp_err = 0, active, aborted;
483 u8 op = tgt_fcpreq->op;
485 spin_lock(&tfcp_req->reqlock);
486 fcpreq = tfcp_req->fcpreq;
487 active = tfcp_req->active;
488 aborted = tfcp_req->aborted;
489 tfcp_req->active = true;
490 spin_unlock(&tfcp_req->reqlock);
492 if (unlikely(active))
493 /* illegal - call while i/o active */
496 if (unlikely(aborted)) {
497 /* target transport has aborted i/o prior */
498 spin_lock(&tfcp_req->reqlock);
499 tfcp_req->active = false;
500 spin_unlock(&tfcp_req->reqlock);
501 tgt_fcpreq->transferred_length = 0;
502 tgt_fcpreq->fcp_error = -ECANCELED;
503 tgt_fcpreq->done(tgt_fcpreq);
508 * if fcpreq is NULL, the I/O has been aborted (from
509 * initiator side). For the target side, act as if all is well
510 * but don't actually move data.
514 case NVMET_FCOP_WRITEDATA:
515 xfrlen = tgt_fcpreq->transfer_length;
517 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
518 fcpreq->first_sgl, tgt_fcpreq->offset,
520 fcpreq->transferred_length += xfrlen;
524 case NVMET_FCOP_READDATA:
525 case NVMET_FCOP_READDATA_RSP:
526 xfrlen = tgt_fcpreq->transfer_length;
528 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
529 fcpreq->first_sgl, tgt_fcpreq->offset,
531 fcpreq->transferred_length += xfrlen;
533 if (op == NVMET_FCOP_READDATA)
536 /* Fall-Thru to RSP handling */
540 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
541 fcpreq->rsplen : tgt_fcpreq->rsplen);
542 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
543 if (rsplen < tgt_fcpreq->rsplen)
545 fcpreq->rcv_rsplen = rsplen;
548 tfcp_req->status = 0;
556 spin_lock(&tfcp_req->reqlock);
557 tfcp_req->active = false;
558 spin_unlock(&tfcp_req->reqlock);
560 tgt_fcpreq->transferred_length = xfrlen;
561 tgt_fcpreq->fcp_error = fcp_err;
562 tgt_fcpreq->done(tgt_fcpreq);
568 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
569 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
571 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
575 * mark aborted only in case there were 2 threads in transport
576 * (one doing io, other doing abort) and only kills ops posted
577 * after the abort request
579 spin_lock(&tfcp_req->reqlock);
580 active = tfcp_req->active;
581 tfcp_req->aborted = true;
582 spin_unlock(&tfcp_req->reqlock);
584 tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
587 * nothing more to do. If io wasn't active, the transport should
588 * immediately call the req_release. If it was active, the op
589 * will complete, and the lldd should call req_release.
594 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
595 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
597 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
599 schedule_work(&tfcp_req->work);
603 fcloop_ls_abort(struct nvme_fc_local_port *localport,
604 struct nvme_fc_remote_port *remoteport,
605 struct nvmefc_ls_req *lsreq)
610 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
611 struct nvme_fc_remote_port *remoteport,
612 void *hw_queue_handle,
613 struct nvmefc_fcp_req *fcpreq)
615 struct fcloop_rport *rport = remoteport->private;
616 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
617 struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
620 /* abort has already been called */
623 if (rport->targetport)
624 nvmet_fc_rcv_fcp_abort(rport->targetport,
625 &tfcp_req->tgt_fcp_req);
627 /* break initiator/target relationship for io */
628 spin_lock(&tfcp_req->reqlock);
629 inireq->tfcp_req = NULL;
630 tfcp_req->fcpreq = NULL;
631 spin_unlock(&tfcp_req->reqlock);
633 /* post the aborted io completion */
634 fcpreq->status = -ECANCELED;
635 schedule_work(&inireq->iniwork);
639 fcloop_localport_delete(struct nvme_fc_local_port *localport)
641 struct fcloop_lport *lport = localport->private;
643 /* release any threads waiting for the unreg to complete */
644 complete(&lport->unreg_done);
648 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
650 struct fcloop_rport *rport = remoteport->private;
652 /* release any threads waiting for the unreg to complete */
653 complete(&rport->nport->rport_unreg_done);
657 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
659 struct fcloop_tport *tport = targetport->private;
661 /* release any threads waiting for the unreg to complete */
662 complete(&tport->nport->tport_unreg_done);
665 #define FCLOOP_HW_QUEUES 4
666 #define FCLOOP_SGL_SEGS 256
667 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
669 static struct nvme_fc_port_template fctemplate = {
670 .localport_delete = fcloop_localport_delete,
671 .remoteport_delete = fcloop_remoteport_delete,
672 .create_queue = fcloop_create_queue,
673 .delete_queue = fcloop_delete_queue,
674 .ls_req = fcloop_ls_req,
675 .fcp_io = fcloop_fcp_req,
676 .ls_abort = fcloop_ls_abort,
677 .fcp_abort = fcloop_fcp_abort,
678 .max_hw_queues = FCLOOP_HW_QUEUES,
679 .max_sgl_segments = FCLOOP_SGL_SEGS,
680 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
681 .dma_boundary = FCLOOP_DMABOUND_4G,
682 /* sizes of additional private data for data structures */
683 .local_priv_sz = sizeof(struct fcloop_lport),
684 .remote_priv_sz = sizeof(struct fcloop_rport),
685 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
686 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
689 static struct nvmet_fc_target_template tgttemplate = {
690 .targetport_delete = fcloop_targetport_delete,
691 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
692 .fcp_op = fcloop_fcp_op,
693 .fcp_abort = fcloop_tgt_fcp_abort,
694 .fcp_req_release = fcloop_fcp_req_release,
695 .max_hw_queues = FCLOOP_HW_QUEUES,
696 .max_sgl_segments = FCLOOP_SGL_SEGS,
697 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
698 .dma_boundary = FCLOOP_DMABOUND_4G,
699 /* optional features */
700 .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
701 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED |
702 NVMET_FCTGTFEAT_OPDONE_IN_ISR,
703 /* sizes of additional private data for data structures */
704 .target_priv_sz = sizeof(struct fcloop_tport),
708 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
709 const char *buf, size_t count)
711 struct nvme_fc_port_info pinfo;
712 struct fcloop_ctrl_options *opts;
713 struct nvme_fc_local_port *localport;
714 struct fcloop_lport *lport;
717 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
721 ret = fcloop_parse_options(opts, buf);
725 /* everything there ? */
726 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
731 pinfo.node_name = opts->wwnn;
732 pinfo.port_name = opts->wwpn;
733 pinfo.port_role = opts->roles;
734 pinfo.port_id = opts->fcaddr;
736 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
741 lport = localport->private;
742 lport->localport = localport;
743 INIT_LIST_HEAD(&lport->lport_list);
745 spin_lock_irqsave(&fcloop_lock, flags);
746 list_add_tail(&lport->lport_list, &fcloop_lports);
747 spin_unlock_irqrestore(&fcloop_lock, flags);
749 /* mark all of the input buffer consumed */
755 return ret ? ret : count;
760 __unlink_local_port(struct fcloop_lport *lport)
762 list_del(&lport->lport_list);
766 __wait_localport_unreg(struct fcloop_lport *lport)
770 init_completion(&lport->unreg_done);
772 ret = nvme_fc_unregister_localport(lport->localport);
774 wait_for_completion(&lport->unreg_done);
781 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
782 const char *buf, size_t count)
784 struct fcloop_lport *tlport, *lport = NULL;
785 u64 nodename, portname;
789 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
793 spin_lock_irqsave(&fcloop_lock, flags);
795 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
796 if (tlport->localport->node_name == nodename &&
797 tlport->localport->port_name == portname) {
799 __unlink_local_port(lport);
803 spin_unlock_irqrestore(&fcloop_lock, flags);
808 ret = __wait_localport_unreg(lport);
810 return ret ? ret : count;
814 fcloop_nport_free(struct kref *ref)
816 struct fcloop_nport *nport =
817 container_of(ref, struct fcloop_nport, ref);
820 spin_lock_irqsave(&fcloop_lock, flags);
821 list_del(&nport->nport_list);
822 spin_unlock_irqrestore(&fcloop_lock, flags);
828 fcloop_nport_put(struct fcloop_nport *nport)
830 kref_put(&nport->ref, fcloop_nport_free);
834 fcloop_nport_get(struct fcloop_nport *nport)
836 return kref_get_unless_zero(&nport->ref);
839 static struct fcloop_nport *
840 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
842 struct fcloop_nport *newnport, *nport = NULL;
843 struct fcloop_lport *tmplport, *lport = NULL;
844 struct fcloop_ctrl_options *opts;
846 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
849 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
853 ret = fcloop_parse_options(opts, buf);
857 /* everything there ? */
858 if ((opts->mask & opts_mask) != opts_mask) {
863 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
867 INIT_LIST_HEAD(&newnport->nport_list);
868 newnport->node_name = opts->wwnn;
869 newnport->port_name = opts->wwpn;
870 if (opts->mask & NVMF_OPT_ROLES)
871 newnport->port_role = opts->roles;
872 if (opts->mask & NVMF_OPT_FCADDR)
873 newnport->port_id = opts->fcaddr;
874 kref_init(&newnport->ref);
876 spin_lock_irqsave(&fcloop_lock, flags);
878 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
879 if (tmplport->localport->node_name == opts->wwnn &&
880 tmplport->localport->port_name == opts->wwpn)
881 goto out_invalid_opts;
883 if (tmplport->localport->node_name == opts->lpwwnn &&
884 tmplport->localport->port_name == opts->lpwwpn)
890 goto out_invalid_opts;
891 newnport->lport = lport;
894 list_for_each_entry(nport, &fcloop_nports, nport_list) {
895 if (nport->node_name == opts->wwnn &&
896 nport->port_name == opts->wwpn) {
897 if ((remoteport && nport->rport) ||
898 (!remoteport && nport->tport)) {
900 goto out_invalid_opts;
903 fcloop_nport_get(nport);
905 spin_unlock_irqrestore(&fcloop_lock, flags);
908 nport->lport = lport;
909 if (opts->mask & NVMF_OPT_ROLES)
910 nport->port_role = opts->roles;
911 if (opts->mask & NVMF_OPT_FCADDR)
912 nport->port_id = opts->fcaddr;
913 goto out_free_newnport;
917 list_add_tail(&newnport->nport_list, &fcloop_nports);
919 spin_unlock_irqrestore(&fcloop_lock, flags);
925 spin_unlock_irqrestore(&fcloop_lock, flags);
934 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
935 const char *buf, size_t count)
937 struct nvme_fc_remote_port *remoteport;
938 struct fcloop_nport *nport;
939 struct fcloop_rport *rport;
940 struct nvme_fc_port_info pinfo;
943 nport = fcloop_alloc_nport(buf, count, true);
947 pinfo.node_name = nport->node_name;
948 pinfo.port_name = nport->port_name;
949 pinfo.port_role = nport->port_role;
950 pinfo.port_id = nport->port_id;
952 ret = nvme_fc_register_remoteport(nport->lport->localport,
953 &pinfo, &remoteport);
954 if (ret || !remoteport) {
955 fcloop_nport_put(nport);
960 rport = remoteport->private;
961 rport->remoteport = remoteport;
962 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
964 nport->tport->remoteport = remoteport;
965 nport->tport->lport = nport->lport;
967 rport->nport = nport;
968 rport->lport = nport->lport;
969 nport->rport = rport;
975 static struct fcloop_rport *
976 __unlink_remote_port(struct fcloop_nport *nport)
978 struct fcloop_rport *rport = nport->rport;
980 if (rport && nport->tport)
981 nport->tport->remoteport = NULL;
988 __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
995 init_completion(&nport->rport_unreg_done);
997 ret = nvme_fc_unregister_remoteport(rport->remoteport);
1001 wait_for_completion(&nport->rport_unreg_done);
1003 fcloop_nport_put(nport);
1009 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1010 const char *buf, size_t count)
1012 struct fcloop_nport *nport = NULL, *tmpport;
1013 static struct fcloop_rport *rport;
1014 u64 nodename, portname;
1015 unsigned long flags;
1018 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1022 spin_lock_irqsave(&fcloop_lock, flags);
1024 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1025 if (tmpport->node_name == nodename &&
1026 tmpport->port_name == portname && tmpport->rport) {
1028 rport = __unlink_remote_port(nport);
1033 spin_unlock_irqrestore(&fcloop_lock, flags);
1038 ret = __wait_remoteport_unreg(nport, rport);
1040 return ret ? ret : count;
1044 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1045 const char *buf, size_t count)
1047 struct nvmet_fc_target_port *targetport;
1048 struct fcloop_nport *nport;
1049 struct fcloop_tport *tport;
1050 struct nvmet_fc_port_info tinfo;
1053 nport = fcloop_alloc_nport(buf, count, false);
1057 tinfo.node_name = nport->node_name;
1058 tinfo.port_name = nport->port_name;
1059 tinfo.port_id = nport->port_id;
1061 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1064 fcloop_nport_put(nport);
1069 tport = targetport->private;
1070 tport->targetport = targetport;
1071 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1073 nport->rport->targetport = targetport;
1074 tport->nport = nport;
1075 tport->lport = nport->lport;
1076 nport->tport = tport;
1082 static struct fcloop_tport *
1083 __unlink_target_port(struct fcloop_nport *nport)
1085 struct fcloop_tport *tport = nport->tport;
1087 if (tport && nport->rport)
1088 nport->rport->targetport = NULL;
1089 nport->tport = NULL;
1095 __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1102 init_completion(&nport->tport_unreg_done);
1104 ret = nvmet_fc_unregister_targetport(tport->targetport);
1108 wait_for_completion(&nport->tport_unreg_done);
1110 fcloop_nport_put(nport);
1116 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1117 const char *buf, size_t count)
1119 struct fcloop_nport *nport = NULL, *tmpport;
1120 struct fcloop_tport *tport;
1121 u64 nodename, portname;
1122 unsigned long flags;
1125 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1129 spin_lock_irqsave(&fcloop_lock, flags);
1131 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1132 if (tmpport->node_name == nodename &&
1133 tmpport->port_name == portname && tmpport->tport) {
1135 tport = __unlink_target_port(nport);
1140 spin_unlock_irqrestore(&fcloop_lock, flags);
1145 ret = __wait_targetport_unreg(nport, tport);
1147 return ret ? ret : count;
1151 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1152 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1153 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1154 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1155 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1156 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1158 static struct attribute *fcloop_dev_attrs[] = {
1159 &dev_attr_add_local_port.attr,
1160 &dev_attr_del_local_port.attr,
1161 &dev_attr_add_remote_port.attr,
1162 &dev_attr_del_remote_port.attr,
1163 &dev_attr_add_target_port.attr,
1164 &dev_attr_del_target_port.attr,
1168 static struct attribute_group fclopp_dev_attrs_group = {
1169 .attrs = fcloop_dev_attrs,
1172 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1173 &fclopp_dev_attrs_group,
1177 static struct class *fcloop_class;
1178 static struct device *fcloop_device;
1181 static int __init fcloop_init(void)
1185 fcloop_class = class_create(THIS_MODULE, "fcloop");
1186 if (IS_ERR(fcloop_class)) {
1187 pr_err("couldn't register class fcloop\n");
1188 ret = PTR_ERR(fcloop_class);
1192 fcloop_device = device_create_with_groups(
1193 fcloop_class, NULL, MKDEV(0, 0), NULL,
1194 fcloop_dev_attr_groups, "ctl");
1195 if (IS_ERR(fcloop_device)) {
1196 pr_err("couldn't create ctl device!\n");
1197 ret = PTR_ERR(fcloop_device);
1198 goto out_destroy_class;
1201 get_device(fcloop_device);
1206 class_destroy(fcloop_class);
1210 static void __exit fcloop_exit(void)
1212 struct fcloop_lport *lport;
1213 struct fcloop_nport *nport;
1214 struct fcloop_tport *tport;
1215 struct fcloop_rport *rport;
1216 unsigned long flags;
1219 spin_lock_irqsave(&fcloop_lock, flags);
1222 nport = list_first_entry_or_null(&fcloop_nports,
1223 typeof(*nport), nport_list);
1227 tport = __unlink_target_port(nport);
1228 rport = __unlink_remote_port(nport);
1230 spin_unlock_irqrestore(&fcloop_lock, flags);
1232 ret = __wait_targetport_unreg(nport, tport);
1234 pr_warn("%s: Failed deleting target port\n", __func__);
1236 ret = __wait_remoteport_unreg(nport, rport);
1238 pr_warn("%s: Failed deleting remote port\n", __func__);
1240 spin_lock_irqsave(&fcloop_lock, flags);
1244 lport = list_first_entry_or_null(&fcloop_lports,
1245 typeof(*lport), lport_list);
1249 __unlink_local_port(lport);
1251 spin_unlock_irqrestore(&fcloop_lock, flags);
1253 ret = __wait_localport_unreg(lport);
1255 pr_warn("%s: Failed deleting local port\n", __func__);
1257 spin_lock_irqsave(&fcloop_lock, flags);
1260 spin_unlock_irqrestore(&fcloop_lock, flags);
1262 put_device(fcloop_device);
1264 device_destroy(fcloop_class, MKDEV(0, 0));
1265 class_destroy(fcloop_class);
1268 module_init(fcloop_init);
1269 module_exit(fcloop_exit);
1271 MODULE_LICENSE("GPL v2");