2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 4
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
46 struct list_head ls_list; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
56 struct scatterlist sg[2];
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
61 #define NVMET_FC_MAX_KB_PER_XFR 256
63 enum nvmet_fcp_datadir {
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
82 enum nvmet_fcp_datadir io_dir;
88 struct work_struct work;
89 struct work_struct done_work;
91 struct nvmet_fc_tgtport *tgtport;
92 struct nvmet_fc_tgt_queue *queue;
94 struct list_head fcp_list; /* tgtport->fcp_list */
97 struct nvmet_fc_tgtport {
99 struct nvmet_fc_target_port fc_target_port;
101 struct list_head tgt_list; /* nvmet_fc_target_list */
102 struct device *dev; /* dev for dma mapping */
103 struct nvmet_fc_target_template *ops;
105 struct nvmet_fc_ls_iod *iod;
107 struct list_head ls_list;
108 struct list_head ls_busylist;
109 struct list_head assoc_list;
110 struct ida assoc_cnt;
111 struct nvmet_port *port;
115 struct nvmet_fc_tgt_queue {
127 struct nvmet_port *port;
128 struct nvmet_cq nvme_cq;
129 struct nvmet_sq nvme_sq;
130 struct nvmet_fc_tgt_assoc *assoc;
131 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
132 struct list_head fod_list;
133 struct workqueue_struct *work_q;
135 } __aligned(sizeof(unsigned long long));
137 struct nvmet_fc_tgt_assoc {
140 struct nvmet_fc_tgtport *tgtport;
141 struct list_head a_list;
142 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
148 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
150 return (iodptr - iodptr->tgtport->iod);
154 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
156 return (fodptr - fodptr->queue->fod);
161 * Association and Connection IDs:
163 * Association ID will have random number in upper 6 bytes and zero
166 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
168 * note: Association ID = Connection ID for queue 0
170 #define BYTES_FOR_QID sizeof(u16)
171 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
172 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
175 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
177 return (assoc->association_id | qid);
181 nvmet_fc_getassociationid(u64 connectionid)
183 return connectionid & ~NVMET_FC_QUEUEID_MASK;
187 nvmet_fc_getqueueid(u64 connectionid)
189 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
192 static inline struct nvmet_fc_tgtport *
193 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
195 return container_of(targetport, struct nvmet_fc_tgtport,
199 static inline struct nvmet_fc_fcp_iod *
200 nvmet_req_to_fod(struct nvmet_req *nvme_req)
202 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
206 /* *************************** Globals **************************** */
209 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
211 static LIST_HEAD(nvmet_fc_target_list);
212 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
215 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
216 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
217 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
218 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
219 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
220 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
221 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
222 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
223 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
226 /* *********************** FC-NVME DMA Handling **************************** */
229 * The fcloop device passes in a NULL device pointer. Real LLD's will
230 * pass in a valid device pointer. If NULL is passed to the dma mapping
231 * routines, depending on the platform, it may or may not succeed, and
235 * Wrapper all the dma routines and check the dev pointer.
237 * If simple mappings (return just a dma address, we'll noop them,
238 * returning a dma address of 0.
240 * On more complex mappings (dma_map_sg), a pseudo routine fills
241 * in the scatter list, setting all dma addresses to 0.
244 static inline dma_addr_t
245 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
246 enum dma_data_direction dir)
248 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
252 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
254 return dev ? dma_mapping_error(dev, dma_addr) : 0;
258 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
259 enum dma_data_direction dir)
262 dma_unmap_single(dev, addr, size, dir);
266 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
267 enum dma_data_direction dir)
270 dma_sync_single_for_cpu(dev, addr, size, dir);
274 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
275 enum dma_data_direction dir)
278 dma_sync_single_for_device(dev, addr, size, dir);
281 /* pseudo dma_map_sg call */
283 fc_map_sg(struct scatterlist *sg, int nents)
285 struct scatterlist *s;
288 WARN_ON(nents == 0 || sg[0].length == 0);
290 for_each_sg(sg, s, nents, i) {
292 #ifdef CONFIG_NEED_SG_DMA_LENGTH
293 s->dma_length = s->length;
300 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
301 enum dma_data_direction dir)
303 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
307 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
308 enum dma_data_direction dir)
311 dma_unmap_sg(dev, sg, nents, dir);
315 /* *********************** FC-NVME Port Management ************************ */
319 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
321 struct nvmet_fc_ls_iod *iod;
324 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
331 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
332 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
333 iod->tgtport = tgtport;
334 list_add_tail(&iod->ls_list, &tgtport->ls_list);
336 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
341 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
343 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
344 NVME_FC_MAX_LS_BUFFER_SIZE,
346 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
354 list_del(&iod->ls_list);
355 for (iod--, i--; i >= 0; iod--, i--) {
356 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
357 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
359 list_del(&iod->ls_list);
368 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
370 struct nvmet_fc_ls_iod *iod = tgtport->iod;
373 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
374 fc_dma_unmap_single(tgtport->dev,
375 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
378 list_del(&iod->ls_list);
383 static struct nvmet_fc_ls_iod *
384 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
386 static struct nvmet_fc_ls_iod *iod;
389 spin_lock_irqsave(&tgtport->lock, flags);
390 iod = list_first_entry_or_null(&tgtport->ls_list,
391 struct nvmet_fc_ls_iod, ls_list);
393 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
394 spin_unlock_irqrestore(&tgtport->lock, flags);
400 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
401 struct nvmet_fc_ls_iod *iod)
405 spin_lock_irqsave(&tgtport->lock, flags);
406 list_move(&iod->ls_list, &tgtport->ls_list);
407 spin_unlock_irqrestore(&tgtport->lock, flags);
411 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
412 struct nvmet_fc_tgt_queue *queue)
414 struct nvmet_fc_fcp_iod *fod = queue->fod;
417 for (i = 0; i < queue->sqsize; fod++, i++) {
418 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
419 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
420 fod->tgtport = tgtport;
423 list_add_tail(&fod->fcp_list, &queue->fod_list);
424 spin_lock_init(&fod->flock);
426 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
427 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
428 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
429 list_del(&fod->fcp_list);
430 for (fod--, i--; i >= 0; fod--, i--) {
431 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
432 sizeof(fod->rspiubuf),
435 list_del(&fod->fcp_list);
444 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
445 struct nvmet_fc_tgt_queue *queue)
447 struct nvmet_fc_fcp_iod *fod = queue->fod;
450 for (i = 0; i < queue->sqsize; fod++, i++) {
452 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
453 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
457 static struct nvmet_fc_fcp_iod *
458 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
460 static struct nvmet_fc_fcp_iod *fod;
463 spin_lock_irqsave(&queue->qlock, flags);
464 fod = list_first_entry_or_null(&queue->fod_list,
465 struct nvmet_fc_fcp_iod, fcp_list);
467 list_del(&fod->fcp_list);
471 * no queue reference is taken, as it was taken by the
472 * queue lookup just prior to the allocation. The iod
473 * will "inherit" that reference.
476 spin_unlock_irqrestore(&queue->qlock, flags);
482 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
483 struct nvmet_fc_fcp_iod *fod)
487 spin_lock_irqsave(&queue->qlock, flags);
488 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
490 spin_unlock_irqrestore(&queue->qlock, flags);
493 * release the reference taken at queue lookup and fod allocation
495 nvmet_fc_tgt_q_put(queue);
499 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
503 if (!(tgtport->ops->target_features &
504 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
505 tgtport->ops->max_hw_queues == 1)
506 return WORK_CPU_UNBOUND;
508 /* Simple cpu selection based on qid modulo active cpu count */
509 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
511 /* find the n'th active cpu */
512 for (cpu = 0, cnt = 0; ; ) {
513 if (cpu_active(cpu)) {
518 cpu = (cpu + 1) % num_possible_cpus();
524 static struct nvmet_fc_tgt_queue *
525 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
528 struct nvmet_fc_tgt_queue *queue;
532 if (qid >= NVMET_NR_QUEUES)
535 queue = kzalloc((sizeof(*queue) +
536 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
541 if (!nvmet_fc_tgt_a_get(assoc))
544 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
545 assoc->tgtport->fc_target_port.port_num,
550 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
552 queue->sqsize = sqsize;
553 queue->assoc = assoc;
554 queue->port = assoc->tgtport->port;
555 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
556 INIT_LIST_HEAD(&queue->fod_list);
557 atomic_set(&queue->connected, 0);
558 atomic_set(&queue->sqtail, 0);
559 atomic_set(&queue->rsn, 1);
560 atomic_set(&queue->zrspcnt, 0);
561 spin_lock_init(&queue->qlock);
562 kref_init(&queue->ref);
564 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
566 ret = nvmet_sq_init(&queue->nvme_sq);
568 goto out_fail_iodlist;
570 WARN_ON(assoc->queues[qid]);
571 spin_lock_irqsave(&assoc->tgtport->lock, flags);
572 assoc->queues[qid] = queue;
573 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
578 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
579 destroy_workqueue(queue->work_q);
581 nvmet_fc_tgt_a_put(assoc);
589 nvmet_fc_tgt_queue_free(struct kref *ref)
591 struct nvmet_fc_tgt_queue *queue =
592 container_of(ref, struct nvmet_fc_tgt_queue, ref);
595 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
596 queue->assoc->queues[queue->qid] = NULL;
597 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
599 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
601 nvmet_fc_tgt_a_put(queue->assoc);
603 destroy_workqueue(queue->work_q);
609 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
611 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
615 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
617 return kref_get_unless_zero(&queue->ref);
622 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
623 struct nvmefc_tgt_fcp_req *fcpreq)
627 fcpreq->op = NVMET_FCOP_ABORT;
630 fcpreq->transfer_length = 0;
631 fcpreq->transferred_length = 0;
632 fcpreq->fcp_error = 0;
635 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
637 /* should never reach here !! */
643 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
645 struct nvmet_fc_fcp_iod *fod = queue->fod;
650 disconnect = atomic_xchg(&queue->connected, 0);
652 spin_lock_irqsave(&queue->qlock, flags);
653 /* about outstanding io's */
654 for (i = 0; i < queue->sqsize; fod++, i++) {
656 spin_lock(&fod->flock);
658 spin_unlock(&fod->flock);
661 spin_unlock_irqrestore(&queue->qlock, flags);
663 flush_workqueue(queue->work_q);
666 nvmet_sq_destroy(&queue->nvme_sq);
668 nvmet_fc_tgt_q_put(queue);
671 static struct nvmet_fc_tgt_queue *
672 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
675 struct nvmet_fc_tgt_assoc *assoc;
676 struct nvmet_fc_tgt_queue *queue;
677 u64 association_id = nvmet_fc_getassociationid(connection_id);
678 u16 qid = nvmet_fc_getqueueid(connection_id);
681 spin_lock_irqsave(&tgtport->lock, flags);
682 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
683 if (association_id == assoc->association_id) {
684 queue = assoc->queues[qid];
686 (!atomic_read(&queue->connected) ||
687 !nvmet_fc_tgt_q_get(queue)))
689 spin_unlock_irqrestore(&tgtport->lock, flags);
693 spin_unlock_irqrestore(&tgtport->lock, flags);
697 static struct nvmet_fc_tgt_assoc *
698 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
700 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
704 bool needrandom = true;
706 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
710 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
714 if (!nvmet_fc_tgtport_get(tgtport))
717 assoc->tgtport = tgtport;
719 INIT_LIST_HEAD(&assoc->a_list);
720 kref_init(&assoc->ref);
723 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
724 ran = ran << BYTES_FOR_QID_SHIFT;
726 spin_lock_irqsave(&tgtport->lock, flags);
728 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
729 if (ran == tmpassoc->association_id) {
734 assoc->association_id = ran;
735 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
737 spin_unlock_irqrestore(&tgtport->lock, flags);
743 ida_simple_remove(&tgtport->assoc_cnt, idx);
750 nvmet_fc_target_assoc_free(struct kref *ref)
752 struct nvmet_fc_tgt_assoc *assoc =
753 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
754 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
757 spin_lock_irqsave(&tgtport->lock, flags);
758 list_del(&assoc->a_list);
759 spin_unlock_irqrestore(&tgtport->lock, flags);
760 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
762 nvmet_fc_tgtport_put(tgtport);
766 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
768 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
772 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
774 return kref_get_unless_zero(&assoc->ref);
778 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
780 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
781 struct nvmet_fc_tgt_queue *queue;
785 spin_lock_irqsave(&tgtport->lock, flags);
786 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
787 queue = assoc->queues[i];
789 if (!nvmet_fc_tgt_q_get(queue))
791 spin_unlock_irqrestore(&tgtport->lock, flags);
792 nvmet_fc_delete_target_queue(queue);
793 nvmet_fc_tgt_q_put(queue);
794 spin_lock_irqsave(&tgtport->lock, flags);
797 spin_unlock_irqrestore(&tgtport->lock, flags);
799 nvmet_fc_tgt_a_put(assoc);
802 static struct nvmet_fc_tgt_assoc *
803 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
806 struct nvmet_fc_tgt_assoc *assoc;
807 struct nvmet_fc_tgt_assoc *ret = NULL;
810 spin_lock_irqsave(&tgtport->lock, flags);
811 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
812 if (association_id == assoc->association_id) {
814 nvmet_fc_tgt_a_get(assoc);
818 spin_unlock_irqrestore(&tgtport->lock, flags);
825 * nvme_fc_register_targetport - transport entry point called by an
826 * LLDD to register the existence of a local
827 * NVME subystem FC port.
828 * @pinfo: pointer to information about the port to be registered
829 * @template: LLDD entrypoints and operational parameters for the port
830 * @dev: physical hardware device node port corresponds to. Will be
831 * used for DMA mappings
832 * @portptr: pointer to a local port pointer. Upon success, the routine
833 * will allocate a nvme_fc_local_port structure and place its
834 * address in the local port pointer. Upon failure, local port
835 * pointer will be set to NULL.
838 * a completion status. Must be 0 upon success; a negative errno
839 * (ex: -ENXIO) upon failure.
842 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
843 struct nvmet_fc_target_template *template,
845 struct nvmet_fc_target_port **portptr)
847 struct nvmet_fc_tgtport *newrec;
851 if (!template->xmt_ls_rsp || !template->fcp_op ||
852 !template->targetport_delete ||
853 !template->max_hw_queues || !template->max_sgl_segments ||
854 !template->max_dif_sgl_segments || !template->dma_boundary) {
856 goto out_regtgt_failed;
859 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
863 goto out_regtgt_failed;
866 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
872 if (!get_device(dev) && dev) {
877 newrec->fc_target_port.node_name = pinfo->node_name;
878 newrec->fc_target_port.port_name = pinfo->port_name;
879 newrec->fc_target_port.private = &newrec[1];
880 newrec->fc_target_port.port_id = pinfo->port_id;
881 newrec->fc_target_port.port_num = idx;
882 INIT_LIST_HEAD(&newrec->tgt_list);
884 newrec->ops = template;
885 spin_lock_init(&newrec->lock);
886 INIT_LIST_HEAD(&newrec->ls_list);
887 INIT_LIST_HEAD(&newrec->ls_busylist);
888 INIT_LIST_HEAD(&newrec->assoc_list);
889 kref_init(&newrec->ref);
890 ida_init(&newrec->assoc_cnt);
892 ret = nvmet_fc_alloc_ls_iodlist(newrec);
895 goto out_free_newrec;
898 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
899 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
900 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
902 *portptr = &newrec->fc_target_port;
908 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
915 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
919 nvmet_fc_free_tgtport(struct kref *ref)
921 struct nvmet_fc_tgtport *tgtport =
922 container_of(ref, struct nvmet_fc_tgtport, ref);
923 struct device *dev = tgtport->dev;
926 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
927 list_del(&tgtport->tgt_list);
928 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
930 nvmet_fc_free_ls_iodlist(tgtport);
932 /* let the LLDD know we've finished tearing it down */
933 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
935 ida_simple_remove(&nvmet_fc_tgtport_cnt,
936 tgtport->fc_target_port.port_num);
938 ida_destroy(&tgtport->assoc_cnt);
946 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
948 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
952 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
954 return kref_get_unless_zero(&tgtport->ref);
958 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
960 struct nvmet_fc_tgt_assoc *assoc, *next;
963 spin_lock_irqsave(&tgtport->lock, flags);
964 list_for_each_entry_safe(assoc, next,
965 &tgtport->assoc_list, a_list) {
966 if (!nvmet_fc_tgt_a_get(assoc))
968 spin_unlock_irqrestore(&tgtport->lock, flags);
969 nvmet_fc_delete_target_assoc(assoc);
970 nvmet_fc_tgt_a_put(assoc);
971 spin_lock_irqsave(&tgtport->lock, flags);
973 spin_unlock_irqrestore(&tgtport->lock, flags);
977 * nvmet layer has called to terminate an association
980 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
982 struct nvmet_fc_tgtport *tgtport, *next;
983 struct nvmet_fc_tgt_assoc *assoc;
984 struct nvmet_fc_tgt_queue *queue;
986 bool found_ctrl = false;
988 /* this is a bit ugly, but don't want to make locks layered */
989 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
990 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
992 if (!nvmet_fc_tgtport_get(tgtport))
994 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
996 spin_lock_irqsave(&tgtport->lock, flags);
997 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
998 queue = assoc->queues[0];
999 if (queue && queue->nvme_sq.ctrl == ctrl) {
1000 if (nvmet_fc_tgt_a_get(assoc))
1005 spin_unlock_irqrestore(&tgtport->lock, flags);
1007 nvmet_fc_tgtport_put(tgtport);
1010 nvmet_fc_delete_target_assoc(assoc);
1011 nvmet_fc_tgt_a_put(assoc);
1015 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1017 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1021 * nvme_fc_unregister_targetport - transport entry point called by an
1022 * LLDD to deregister/remove a previously
1023 * registered a local NVME subsystem FC port.
1024 * @tgtport: pointer to the (registered) target port that is to be
1028 * a completion status. Must be 0 upon success; a negative errno
1029 * (ex: -ENXIO) upon failure.
1032 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1034 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1036 /* terminate any outstanding associations */
1037 __nvmet_fc_free_assocs(tgtport);
1039 nvmet_fc_tgtport_put(tgtport);
1043 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1046 /* *********************** FC-NVME LS Handling **************************** */
1050 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1052 struct fcnvme_ls_acc_hdr *acc = buf;
1054 acc->w0.ls_cmd = ls_cmd;
1055 acc->desc_list_len = desc_len;
1056 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1057 acc->rqst.desc_len =
1058 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1059 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1063 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1064 u8 reason, u8 explanation, u8 vendor)
1066 struct fcnvme_ls_rjt *rjt = buf;
1068 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1069 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1071 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1072 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1073 rjt->rjt.reason_code = reason;
1074 rjt->rjt.reason_explanation = explanation;
1075 rjt->rjt.vendor = vendor;
1077 return sizeof(struct fcnvme_ls_rjt);
1080 /* Validation Error indexes into the string table below */
1083 VERR_CR_ASSOC_LEN = 1,
1084 VERR_CR_ASSOC_RQST_LEN = 2,
1085 VERR_CR_ASSOC_CMD = 3,
1086 VERR_CR_ASSOC_CMD_LEN = 4,
1087 VERR_ERSP_RATIO = 5,
1088 VERR_ASSOC_ALLOC_FAIL = 6,
1089 VERR_QUEUE_ALLOC_FAIL = 7,
1090 VERR_CR_CONN_LEN = 8,
1091 VERR_CR_CONN_RQST_LEN = 9,
1093 VERR_ASSOC_ID_LEN = 11,
1096 VERR_CONN_ID_LEN = 14,
1098 VERR_CR_CONN_CMD = 16,
1099 VERR_CR_CONN_CMD_LEN = 17,
1100 VERR_DISCONN_LEN = 18,
1101 VERR_DISCONN_RQST_LEN = 19,
1102 VERR_DISCONN_CMD = 20,
1103 VERR_DISCONN_CMD_LEN = 21,
1104 VERR_DISCONN_SCOPE = 22,
1106 VERR_RS_RQST_LEN = 24,
1108 VERR_RS_CMD_LEN = 26,
1113 static char *validation_errors[] = {
1115 "Bad CR_ASSOC Length",
1116 "Bad CR_ASSOC Rqst Length",
1118 "Bad CR_ASSOC Cmd Length",
1120 "Association Allocation Failed",
1121 "Queue Allocation Failed",
1122 "Bad CR_CONN Length",
1123 "Bad CR_CONN Rqst Length",
1124 "Not Association ID",
1125 "Bad Association ID Length",
1127 "Not Connection ID",
1128 "Bad Connection ID Length",
1131 "Bad CR_CONN Cmd Length",
1132 "Bad DISCONN Length",
1133 "Bad DISCONN Rqst Length",
1135 "Bad DISCONN Cmd Length",
1136 "Bad Disconnect Scope",
1138 "Bad RS Rqst Length",
1140 "Bad RS Cmd Length",
1142 "Bad RS Relative Offset",
1146 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1147 struct nvmet_fc_ls_iod *iod)
1149 struct fcnvme_ls_cr_assoc_rqst *rqst =
1150 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1151 struct fcnvme_ls_cr_assoc_acc *acc =
1152 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1153 struct nvmet_fc_tgt_queue *queue;
1156 memset(acc, 0, sizeof(*acc));
1158 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1159 ret = VERR_CR_ASSOC_LEN;
1160 else if (rqst->desc_list_len !=
1162 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1163 ret = VERR_CR_ASSOC_RQST_LEN;
1164 else if (rqst->assoc_cmd.desc_tag !=
1165 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1166 ret = VERR_CR_ASSOC_CMD;
1167 else if (rqst->assoc_cmd.desc_len !=
1169 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1170 ret = VERR_CR_ASSOC_CMD_LEN;
1171 else if (!rqst->assoc_cmd.ersp_ratio ||
1172 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1173 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1174 ret = VERR_ERSP_RATIO;
1177 /* new association w/ admin queue */
1178 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1180 ret = VERR_ASSOC_ALLOC_FAIL;
1182 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1183 be16_to_cpu(rqst->assoc_cmd.sqsize));
1185 ret = VERR_QUEUE_ALLOC_FAIL;
1190 dev_err(tgtport->dev,
1191 "Create Association LS failed: %s\n",
1192 validation_errors[ret]);
1193 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1194 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1195 FCNVME_RJT_RC_LOGIC,
1196 FCNVME_RJT_EXP_NONE, 0);
1200 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1201 atomic_set(&queue->connected, 1);
1202 queue->sqhd = 0; /* best place to init value */
1204 /* format a response */
1206 iod->lsreq->rsplen = sizeof(*acc);
1208 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1210 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1211 FCNVME_LS_CREATE_ASSOCIATION);
1212 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1213 acc->associd.desc_len =
1215 sizeof(struct fcnvme_lsdesc_assoc_id));
1216 acc->associd.association_id =
1217 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1218 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1219 acc->connectid.desc_len =
1221 sizeof(struct fcnvme_lsdesc_conn_id));
1222 acc->connectid.connection_id = acc->associd.association_id;
1226 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1227 struct nvmet_fc_ls_iod *iod)
1229 struct fcnvme_ls_cr_conn_rqst *rqst =
1230 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1231 struct fcnvme_ls_cr_conn_acc *acc =
1232 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1233 struct nvmet_fc_tgt_queue *queue;
1236 memset(acc, 0, sizeof(*acc));
1238 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1239 ret = VERR_CR_CONN_LEN;
1240 else if (rqst->desc_list_len !=
1242 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1243 ret = VERR_CR_CONN_RQST_LEN;
1244 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1245 ret = VERR_ASSOC_ID;
1246 else if (rqst->associd.desc_len !=
1248 sizeof(struct fcnvme_lsdesc_assoc_id)))
1249 ret = VERR_ASSOC_ID_LEN;
1250 else if (rqst->connect_cmd.desc_tag !=
1251 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1252 ret = VERR_CR_CONN_CMD;
1253 else if (rqst->connect_cmd.desc_len !=
1255 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1256 ret = VERR_CR_CONN_CMD_LEN;
1257 else if (!rqst->connect_cmd.ersp_ratio ||
1258 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1259 be16_to_cpu(rqst->connect_cmd.sqsize)))
1260 ret = VERR_ERSP_RATIO;
1264 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1265 be64_to_cpu(rqst->associd.association_id));
1267 ret = VERR_NO_ASSOC;
1269 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1270 be16_to_cpu(rqst->connect_cmd.qid),
1271 be16_to_cpu(rqst->connect_cmd.sqsize));
1273 ret = VERR_QUEUE_ALLOC_FAIL;
1275 /* release get taken in nvmet_fc_find_target_assoc */
1276 nvmet_fc_tgt_a_put(iod->assoc);
1281 dev_err(tgtport->dev,
1282 "Create Connection LS failed: %s\n",
1283 validation_errors[ret]);
1284 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1285 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1286 (ret == VERR_NO_ASSOC) ?
1287 FCNVME_RJT_RC_INV_ASSOC :
1288 FCNVME_RJT_RC_LOGIC,
1289 FCNVME_RJT_EXP_NONE, 0);
1293 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1294 atomic_set(&queue->connected, 1);
1295 queue->sqhd = 0; /* best place to init value */
1297 /* format a response */
1299 iod->lsreq->rsplen = sizeof(*acc);
1301 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1302 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1303 FCNVME_LS_CREATE_CONNECTION);
1304 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1305 acc->connectid.desc_len =
1307 sizeof(struct fcnvme_lsdesc_conn_id));
1308 acc->connectid.connection_id =
1309 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1310 be16_to_cpu(rqst->connect_cmd.qid)));
1314 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1315 struct nvmet_fc_ls_iod *iod)
1317 struct fcnvme_ls_disconnect_rqst *rqst =
1318 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1319 struct fcnvme_ls_disconnect_acc *acc =
1320 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1321 struct nvmet_fc_tgt_queue *queue = NULL;
1322 struct nvmet_fc_tgt_assoc *assoc;
1324 bool del_assoc = false;
1326 memset(acc, 0, sizeof(*acc));
1328 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1329 ret = VERR_DISCONN_LEN;
1330 else if (rqst->desc_list_len !=
1332 sizeof(struct fcnvme_ls_disconnect_rqst)))
1333 ret = VERR_DISCONN_RQST_LEN;
1334 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1335 ret = VERR_ASSOC_ID;
1336 else if (rqst->associd.desc_len !=
1338 sizeof(struct fcnvme_lsdesc_assoc_id)))
1339 ret = VERR_ASSOC_ID_LEN;
1340 else if (rqst->discon_cmd.desc_tag !=
1341 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1342 ret = VERR_DISCONN_CMD;
1343 else if (rqst->discon_cmd.desc_len !=
1345 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1346 ret = VERR_DISCONN_CMD_LEN;
1347 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1348 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1349 ret = VERR_DISCONN_SCOPE;
1351 /* match an active association */
1352 assoc = nvmet_fc_find_target_assoc(tgtport,
1353 be64_to_cpu(rqst->associd.association_id));
1356 if (rqst->discon_cmd.scope ==
1357 FCNVME_DISCONN_CONNECTION) {
1358 queue = nvmet_fc_find_target_queue(tgtport,
1360 rqst->discon_cmd.id));
1362 nvmet_fc_tgt_a_put(assoc);
1367 ret = VERR_NO_ASSOC;
1371 dev_err(tgtport->dev,
1372 "Disconnect LS failed: %s\n",
1373 validation_errors[ret]);
1374 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1375 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1376 (ret == VERR_NO_ASSOC) ?
1377 FCNVME_RJT_RC_INV_ASSOC :
1378 (ret == VERR_NO_CONN) ?
1379 FCNVME_RJT_RC_INV_CONN :
1380 FCNVME_RJT_RC_LOGIC,
1381 FCNVME_RJT_EXP_NONE, 0);
1385 /* format a response */
1387 iod->lsreq->rsplen = sizeof(*acc);
1389 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1391 sizeof(struct fcnvme_ls_disconnect_acc)),
1392 FCNVME_LS_DISCONNECT);
1395 /* are we to delete a Connection ID (queue) */
1397 int qid = queue->qid;
1399 nvmet_fc_delete_target_queue(queue);
1401 /* release the get taken by find_target_queue */
1402 nvmet_fc_tgt_q_put(queue);
1404 /* tear association down if io queue terminated */
1409 /* release get taken in nvmet_fc_find_target_assoc */
1410 nvmet_fc_tgt_a_put(iod->assoc);
1413 nvmet_fc_delete_target_assoc(iod->assoc);
1417 /* *********************** NVME Ctrl Routines **************************** */
1420 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1422 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1425 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1427 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1428 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1430 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1431 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1432 nvmet_fc_free_ls_iod(tgtport, iod);
1433 nvmet_fc_tgtport_put(tgtport);
1437 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1438 struct nvmet_fc_ls_iod *iod)
1442 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1443 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1445 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1447 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1451 * Actual processing routine for received FC-NVME LS Requests from the LLD
1454 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1455 struct nvmet_fc_ls_iod *iod)
1457 struct fcnvme_ls_rqst_w0 *w0 =
1458 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1460 iod->lsreq->nvmet_fc_private = iod;
1461 iod->lsreq->rspbuf = iod->rspbuf;
1462 iod->lsreq->rspdma = iod->rspdma;
1463 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1464 /* Be preventative. handlers will later set to valid length */
1465 iod->lsreq->rsplen = 0;
1471 * parse request input, execute the request, and format the
1474 switch (w0->ls_cmd) {
1475 case FCNVME_LS_CREATE_ASSOCIATION:
1476 /* Creates Association and initial Admin Queue/Connection */
1477 nvmet_fc_ls_create_association(tgtport, iod);
1479 case FCNVME_LS_CREATE_CONNECTION:
1480 /* Creates an IO Queue/Connection */
1481 nvmet_fc_ls_create_connection(tgtport, iod);
1483 case FCNVME_LS_DISCONNECT:
1484 /* Terminate a Queue/Connection or the Association */
1485 nvmet_fc_ls_disconnect(tgtport, iod);
1488 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1489 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1490 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1493 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1497 * Actual processing routine for received FC-NVME LS Requests from the LLD
1500 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1502 struct nvmet_fc_ls_iod *iod =
1503 container_of(work, struct nvmet_fc_ls_iod, work);
1504 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1506 nvmet_fc_handle_ls_rqst(tgtport, iod);
1511 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1512 * upon the reception of a NVME LS request.
1514 * The nvmet-fc layer will copy payload to an internal structure for
1515 * processing. As such, upon completion of the routine, the LLDD may
1516 * immediately free/reuse the LS request buffer passed in the call.
1518 * If this routine returns error, the LLDD should abort the exchange.
1520 * @tgtport: pointer to the (registered) target port the LS was
1522 * @lsreq: pointer to a lsreq request structure to be used to reference
1523 * the exchange corresponding to the LS.
1524 * @lsreqbuf: pointer to the buffer containing the LS Request
1525 * @lsreqbuf_len: length, in bytes, of the received LS request
1528 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1529 struct nvmefc_tgt_ls_req *lsreq,
1530 void *lsreqbuf, u32 lsreqbuf_len)
1532 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1533 struct nvmet_fc_ls_iod *iod;
1535 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1538 if (!nvmet_fc_tgtport_get(tgtport))
1541 iod = nvmet_fc_alloc_ls_iod(tgtport);
1543 nvmet_fc_tgtport_put(tgtport);
1549 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1550 iod->rqstdatalen = lsreqbuf_len;
1552 schedule_work(&iod->work);
1556 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1560 * **********************
1561 * Start of FCP handling
1562 * **********************
1566 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1568 struct scatterlist *sg;
1571 u32 page_len, length;
1574 length = fod->total_length;
1575 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1576 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1580 sg_init_table(sg, nent);
1583 page_len = min_t(u32, length, PAGE_SIZE);
1585 page = alloc_page(GFP_KERNEL);
1587 goto out_free_pages;
1589 sg_set_page(&sg[i], page, page_len, 0);
1595 fod->data_sg_cnt = nent;
1596 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1597 ((fod->io_dir == NVMET_FCP_WRITE) ?
1598 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1599 /* note: write from initiator perspective */
1606 __free_page(sg_page(&sg[i]));
1609 fod->data_sg = NULL;
1610 fod->data_sg_cnt = 0;
1612 return NVME_SC_INTERNAL;
1616 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1618 struct scatterlist *sg;
1621 if (!fod->data_sg || !fod->data_sg_cnt)
1624 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1625 ((fod->io_dir == NVMET_FCP_WRITE) ?
1626 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1627 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1628 __free_page(sg_page(sg));
1629 kfree(fod->data_sg);
1630 fod->data_sg = NULL;
1631 fod->data_sg_cnt = 0;
1636 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1640 /* egad, this is ugly. And sqtail is just a best guess */
1641 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1643 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1644 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1649 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1652 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1653 struct nvmet_fc_fcp_iod *fod)
1655 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1656 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1657 struct nvme_completion *cqe = &ersp->cqe;
1658 u32 *cqewd = (u32 *)cqe;
1659 bool send_ersp = false;
1660 u32 rsn, rspcnt, xfr_length;
1662 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1663 xfr_length = fod->total_length;
1665 xfr_length = fod->offset;
1668 * check to see if we can send a 0's rsp.
1669 * Note: to send a 0's response, the NVME-FC host transport will
1670 * recreate the CQE. The host transport knows: sq id, SQHD (last
1671 * seen in an ersp), and command_id. Thus it will create a
1672 * zero-filled CQE with those known fields filled in. Transport
1673 * must send an ersp for any condition where the cqe won't match
1676 * Here are the FC-NVME mandated cases where we must send an ersp:
1677 * every N responses, where N=ersp_ratio
1678 * force fabric commands to send ersp's (not in FC-NVME but good
1680 * normal cmds: any time status is non-zero, or status is zero
1681 * but words 0 or 1 are non-zero.
1682 * the SQ is 90% or more full
1683 * the cmd is a fused command
1684 * transferred data length not equal to cmd iu length
1686 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1687 if (!(rspcnt % fod->queue->ersp_ratio) ||
1688 sqe->opcode == nvme_fabrics_command ||
1689 xfr_length != fod->total_length ||
1690 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1691 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1692 queue_90percent_full(fod->queue, cqe->sq_head))
1695 /* re-set the fields */
1696 fod->fcpreq->rspaddr = ersp;
1697 fod->fcpreq->rspdma = fod->rspdma;
1700 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1701 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1703 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1704 rsn = atomic_inc_return(&fod->queue->rsn);
1705 ersp->rsn = cpu_to_be32(rsn);
1706 ersp->xfrd_len = cpu_to_be32(xfr_length);
1707 fod->fcpreq->rsplen = sizeof(*ersp);
1710 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1711 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1714 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1717 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1718 struct nvmet_fc_fcp_iod *fod)
1722 fod->fcpreq->op = NVMET_FCOP_RSP;
1723 fod->fcpreq->timeout = 0;
1725 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1727 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1729 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1733 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1734 struct nvmet_fc_fcp_iod *fod, u8 op)
1736 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1737 struct scatterlist *sg, *datasg;
1742 fcpreq->offset = fod->offset;
1743 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1744 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1745 (fod->total_length - fod->offset));
1746 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1747 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1749 fcpreq->transfer_length = tlen;
1750 fcpreq->transferred_length = 0;
1751 fcpreq->fcp_error = 0;
1756 datasg = fod->next_sg;
1757 sg_off = fod->next_sg_offset;
1759 for (sg = fcpreq->sg ; tlen; sg++) {
1762 sg->offset += sg_off;
1763 sg->length -= sg_off;
1764 sg->dma_address += sg_off;
1767 if (tlen < sg->length) {
1769 fod->next_sg = datasg;
1770 fod->next_sg_offset += tlen;
1771 } else if (tlen == sg->length) {
1772 fod->next_sg_offset = 0;
1773 fod->next_sg = sg_next(datasg);
1775 fod->next_sg_offset = 0;
1776 datasg = sg_next(datasg);
1783 * If the last READDATA request: check if LLDD supports
1784 * combined xfr with response.
1786 if ((op == NVMET_FCOP_READDATA) &&
1787 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1788 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1789 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1790 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1793 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1796 * should be ok to set w/o lock as its in the thread of
1797 * execution (not an async timer routine) and doesn't
1798 * contend with any clearing action
1802 if (op == NVMET_FCOP_WRITEDATA)
1803 nvmet_req_complete(&fod->req,
1804 NVME_SC_FC_TRANSPORT_ERROR);
1805 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1806 fcpreq->fcp_error = ret;
1807 fcpreq->transferred_length = 0;
1808 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1814 * actual done handler for FCP operations when completed by the lldd
1817 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1819 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1820 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1821 unsigned long flags;
1824 spin_lock_irqsave(&fod->flock, flags);
1826 spin_unlock_irqrestore(&fod->flock, flags);
1828 /* if in the middle of an io and we need to tear down */
1829 if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1830 /* data no longer needed */
1831 nvmet_fc_free_tgt_pgs(fod);
1833 nvmet_req_complete(&fod->req, fcpreq->fcp_error);
1837 switch (fcpreq->op) {
1839 case NVMET_FCOP_WRITEDATA:
1840 if (fcpreq->fcp_error ||
1841 fcpreq->transferred_length != fcpreq->transfer_length) {
1842 nvmet_req_complete(&fod->req,
1843 NVME_SC_FC_TRANSPORT_ERROR);
1847 fod->offset += fcpreq->transferred_length;
1848 if (fod->offset != fod->total_length) {
1849 /* transfer the next chunk */
1850 nvmet_fc_transfer_fcp_data(tgtport, fod,
1851 NVMET_FCOP_WRITEDATA);
1855 /* data transfer complete, resume with nvmet layer */
1857 fod->req.execute(&fod->req);
1861 case NVMET_FCOP_READDATA:
1862 case NVMET_FCOP_READDATA_RSP:
1863 if (fcpreq->fcp_error ||
1864 fcpreq->transferred_length != fcpreq->transfer_length) {
1865 /* data no longer needed */
1866 nvmet_fc_free_tgt_pgs(fod);
1868 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1874 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1875 /* data no longer needed */
1876 nvmet_fc_free_tgt_pgs(fod);
1877 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1878 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1879 nvmet_fc_free_fcp_iod(fod->queue, fod);
1883 fod->offset += fcpreq->transferred_length;
1884 if (fod->offset != fod->total_length) {
1885 /* transfer the next chunk */
1886 nvmet_fc_transfer_fcp_data(tgtport, fod,
1887 NVMET_FCOP_READDATA);
1891 /* data transfer complete, send response */
1893 /* data no longer needed */
1894 nvmet_fc_free_tgt_pgs(fod);
1896 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1900 case NVMET_FCOP_RSP:
1901 case NVMET_FCOP_ABORT:
1902 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1903 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1904 nvmet_fc_free_fcp_iod(fod->queue, fod);
1908 nvmet_fc_free_tgt_pgs(fod);
1909 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1915 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1917 struct nvmet_fc_fcp_iod *fod =
1918 container_of(work, struct nvmet_fc_fcp_iod, done_work);
1920 nvmet_fc_fod_op_done(fod);
1924 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1926 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1927 struct nvmet_fc_tgt_queue *queue = fod->queue;
1929 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1930 /* context switch so completion is not in ISR context */
1931 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1933 nvmet_fc_fod_op_done(fod);
1937 * actual completion handler after execution by the nvmet layer
1940 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1941 struct nvmet_fc_fcp_iod *fod, int status)
1943 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1944 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1945 unsigned long flags;
1948 spin_lock_irqsave(&fod->flock, flags);
1950 spin_unlock_irqrestore(&fod->flock, flags);
1952 /* if we have a CQE, snoop the last sq_head value */
1954 fod->queue->sqhd = cqe->sq_head;
1957 /* data no longer needed */
1958 nvmet_fc_free_tgt_pgs(fod);
1960 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1964 /* if an error handling the cmd post initial parsing */
1966 /* fudge up a failed CQE status for our transport error */
1967 memset(cqe, 0, sizeof(*cqe));
1968 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
1969 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1970 cqe->command_id = sqe->command_id;
1971 cqe->status = cpu_to_le16(status);
1975 * try to push the data even if the SQE status is non-zero.
1976 * There may be a status where data still was intended to
1979 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1980 /* push the data over before sending rsp */
1981 nvmet_fc_transfer_fcp_data(tgtport, fod,
1982 NVMET_FCOP_READDATA);
1986 /* writes & no data - fall thru */
1989 /* data no longer needed */
1990 nvmet_fc_free_tgt_pgs(fod);
1992 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1997 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1999 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2000 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2002 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2007 * Actual processing routine for received FC-NVME LS Requests from the LLD
2010 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2011 struct nvmet_fc_fcp_iod *fod)
2013 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2017 * Fused commands are currently not supported in the linux
2020 * As such, the implementation of the FC transport does not
2021 * look at the fused commands and order delivery to the upper
2022 * layer until we have both based on csn.
2025 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2027 fod->total_length = be32_to_cpu(cmdiu->data_len);
2028 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2029 fod->io_dir = NVMET_FCP_WRITE;
2030 if (!nvme_is_write(&cmdiu->sqe))
2031 goto transport_error;
2032 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2033 fod->io_dir = NVMET_FCP_READ;
2034 if (nvme_is_write(&cmdiu->sqe))
2035 goto transport_error;
2037 fod->io_dir = NVMET_FCP_NODATA;
2038 if (fod->total_length)
2039 goto transport_error;
2042 fod->req.cmd = &fod->cmdiubuf.sqe;
2043 fod->req.rsp = &fod->rspiubuf.cqe;
2044 fod->req.port = fod->queue->port;
2046 /* ensure nvmet handlers will set cmd handler callback */
2047 fod->req.execute = NULL;
2049 /* clear any response payload */
2050 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2052 ret = nvmet_req_init(&fod->req,
2053 &fod->queue->nvme_cq,
2054 &fod->queue->nvme_sq,
2055 &nvmet_fc_tgt_fcp_ops);
2056 if (!ret) { /* bad SQE content */
2057 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2061 /* keep a running counter of tail position */
2062 atomic_inc(&fod->queue->sqtail);
2064 fod->data_sg = NULL;
2065 fod->data_sg_cnt = 0;
2066 if (fod->total_length) {
2067 ret = nvmet_fc_alloc_tgt_pgs(fod);
2069 nvmet_req_complete(&fod->req, ret);
2073 fod->req.sg = fod->data_sg;
2074 fod->req.sg_cnt = fod->data_sg_cnt;
2076 fod->next_sg = fod->data_sg;
2077 fod->next_sg_offset = 0;
2079 if (fod->io_dir == NVMET_FCP_WRITE) {
2080 /* pull the data over before invoking nvmet layer */
2081 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2088 * can invoke the nvmet_layer now. If read data, cmd completion will
2092 fod->req.execute(&fod->req);
2097 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2101 * Actual processing routine for received FC-NVME LS Requests from the LLD
2104 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2106 struct nvmet_fc_fcp_iod *fod =
2107 container_of(work, struct nvmet_fc_fcp_iod, work);
2108 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2110 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2114 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2115 * upon the reception of a NVME FCP CMD IU.
2117 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2118 * layer for processing.
2120 * The nvmet-fc layer will copy cmd payload to an internal structure for
2121 * processing. As such, upon completion of the routine, the LLDD may
2122 * immediately free/reuse the CMD IU buffer passed in the call.
2124 * If this routine returns error, the lldd should abort the exchange.
2126 * @target_port: pointer to the (registered) target port the FCP CMD IU
2128 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2129 * the exchange corresponding to the FCP Exchange.
2130 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2131 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2134 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2135 struct nvmefc_tgt_fcp_req *fcpreq,
2136 void *cmdiubuf, u32 cmdiubuf_len)
2138 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2139 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2140 struct nvmet_fc_tgt_queue *queue;
2141 struct nvmet_fc_fcp_iod *fod;
2143 /* validate iu, so the connection id can be used to find the queue */
2144 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2145 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2146 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2147 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2151 queue = nvmet_fc_find_target_queue(tgtport,
2152 be64_to_cpu(cmdiu->connection_id));
2157 * note: reference taken by find_target_queue
2158 * After successful fod allocation, the fod will inherit the
2159 * ownership of that reference and will remove the reference
2160 * when the fod is freed.
2163 fod = nvmet_fc_alloc_fcp_iod(queue);
2165 /* release the queue lookup reference */
2166 nvmet_fc_tgt_q_put(queue);
2170 fcpreq->nvmet_fc_private = fod;
2171 fod->fcpreq = fcpreq;
2173 * put all admin cmds on hw queue id 0. All io commands go to
2174 * the respective hw queue based on a modulo basis
2176 fcpreq->hwqid = queue->qid ?
2177 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2178 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2180 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2181 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2183 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2187 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2191 FCT_TRADDR_WWNN = 1 << 0,
2192 FCT_TRADDR_WWPN = 1 << 1,
2195 struct nvmet_fc_traddr {
2200 static const match_table_t traddr_opt_tokens = {
2201 { FCT_TRADDR_WWNN, "nn-%s" },
2202 { FCT_TRADDR_WWPN, "pn-%s" },
2203 { FCT_TRADDR_ERR, NULL }
2207 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2209 substring_t args[MAX_OPT_ARGS];
2210 char *options, *o, *p;
2214 options = o = kstrdup(buf, GFP_KERNEL);
2218 while ((p = strsep(&o, ",\n")) != NULL) {
2222 token = match_token(p, traddr_opt_tokens, args);
2224 case FCT_TRADDR_WWNN:
2225 if (match_u64(args, &token64)) {
2229 traddr->nn = token64;
2231 case FCT_TRADDR_WWPN:
2232 if (match_u64(args, &token64)) {
2236 traddr->pn = token64;
2239 pr_warn("unknown traddr token or missing value '%s'\n",
2252 nvmet_fc_add_port(struct nvmet_port *port)
2254 struct nvmet_fc_tgtport *tgtport;
2255 struct nvmet_fc_traddr traddr = { 0L, 0L };
2256 unsigned long flags;
2259 /* validate the address info */
2260 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2261 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2264 /* map the traddr address info to a target port */
2266 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2271 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2272 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2273 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2274 (tgtport->fc_target_port.port_name == traddr.pn)) {
2275 /* a FC port can only be 1 nvmet port id */
2276 if (!tgtport->port) {
2277 tgtport->port = port;
2278 port->priv = tgtport;
2285 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2290 nvmet_fc_remove_port(struct nvmet_port *port)
2292 struct nvmet_fc_tgtport *tgtport = port->priv;
2293 unsigned long flags;
2295 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2296 if (tgtport->port == port) {
2297 nvmet_fc_tgtport_put(tgtport);
2298 tgtport->port = NULL;
2300 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2303 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2304 .owner = THIS_MODULE,
2305 .type = NVMF_TRTYPE_FC,
2307 .add_port = nvmet_fc_add_port,
2308 .remove_port = nvmet_fc_remove_port,
2309 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2310 .delete_ctrl = nvmet_fc_delete_ctrl,
2313 static int __init nvmet_fc_init_module(void)
2315 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2318 static void __exit nvmet_fc_exit_module(void)
2320 /* sanity check - all lports should be removed */
2321 if (!list_empty(&nvmet_fc_target_list))
2322 pr_warn("%s: targetport list not empty\n", __func__);
2324 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2326 ida_destroy(&nvmet_fc_tgtport_cnt);
2329 module_init(nvmet_fc_init_module);
2330 module_exit(nvmet_fc_exit_module);
2332 MODULE_LICENSE("GPL v2");