2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 4
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
46 struct list_head ls_list; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
56 struct scatterlist sg[2];
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
61 #define NVMET_FC_MAX_KB_PER_XFR 256
63 enum nvmet_fcp_datadir {
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
82 enum nvmet_fcp_datadir io_dir;
90 struct work_struct work;
91 struct work_struct done_work;
93 struct nvmet_fc_tgtport *tgtport;
94 struct nvmet_fc_tgt_queue *queue;
96 struct list_head fcp_list; /* tgtport->fcp_list */
99 struct nvmet_fc_tgtport {
101 struct nvmet_fc_target_port fc_target_port;
103 struct list_head tgt_list; /* nvmet_fc_target_list */
104 struct device *dev; /* dev for dma mapping */
105 struct nvmet_fc_target_template *ops;
107 struct nvmet_fc_ls_iod *iod;
109 struct list_head ls_list;
110 struct list_head ls_busylist;
111 struct list_head assoc_list;
112 struct ida assoc_cnt;
113 struct nvmet_port *port;
117 struct nvmet_fc_tgt_queue {
129 struct nvmet_port *port;
130 struct nvmet_cq nvme_cq;
131 struct nvmet_sq nvme_sq;
132 struct nvmet_fc_tgt_assoc *assoc;
133 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
134 struct list_head fod_list;
135 struct workqueue_struct *work_q;
137 } __aligned(sizeof(unsigned long long));
139 struct nvmet_fc_tgt_assoc {
142 struct nvmet_fc_tgtport *tgtport;
143 struct list_head a_list;
144 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
150 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
152 return (iodptr - iodptr->tgtport->iod);
156 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
158 return (fodptr - fodptr->queue->fod);
163 * Association and Connection IDs:
165 * Association ID will have random number in upper 6 bytes and zero
168 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
170 * note: Association ID = Connection ID for queue 0
172 #define BYTES_FOR_QID sizeof(u16)
173 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
174 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
177 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
179 return (assoc->association_id | qid);
183 nvmet_fc_getassociationid(u64 connectionid)
185 return connectionid & ~NVMET_FC_QUEUEID_MASK;
189 nvmet_fc_getqueueid(u64 connectionid)
191 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
194 static inline struct nvmet_fc_tgtport *
195 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
197 return container_of(targetport, struct nvmet_fc_tgtport,
201 static inline struct nvmet_fc_fcp_iod *
202 nvmet_req_to_fod(struct nvmet_req *nvme_req)
204 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
208 /* *************************** Globals **************************** */
211 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
213 static LIST_HEAD(nvmet_fc_target_list);
214 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
217 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
218 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
219 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
220 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
221 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
222 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
223 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
224 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
225 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
228 /* *********************** FC-NVME DMA Handling **************************** */
231 * The fcloop device passes in a NULL device pointer. Real LLD's will
232 * pass in a valid device pointer. If NULL is passed to the dma mapping
233 * routines, depending on the platform, it may or may not succeed, and
237 * Wrapper all the dma routines and check the dev pointer.
239 * If simple mappings (return just a dma address, we'll noop them,
240 * returning a dma address of 0.
242 * On more complex mappings (dma_map_sg), a pseudo routine fills
243 * in the scatter list, setting all dma addresses to 0.
246 static inline dma_addr_t
247 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
248 enum dma_data_direction dir)
250 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
254 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
256 return dev ? dma_mapping_error(dev, dma_addr) : 0;
260 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
261 enum dma_data_direction dir)
264 dma_unmap_single(dev, addr, size, dir);
268 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
269 enum dma_data_direction dir)
272 dma_sync_single_for_cpu(dev, addr, size, dir);
276 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
277 enum dma_data_direction dir)
280 dma_sync_single_for_device(dev, addr, size, dir);
283 /* pseudo dma_map_sg call */
285 fc_map_sg(struct scatterlist *sg, int nents)
287 struct scatterlist *s;
290 WARN_ON(nents == 0 || sg[0].length == 0);
292 for_each_sg(sg, s, nents, i) {
294 #ifdef CONFIG_NEED_SG_DMA_LENGTH
295 s->dma_length = s->length;
302 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
303 enum dma_data_direction dir)
305 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
309 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
310 enum dma_data_direction dir)
313 dma_unmap_sg(dev, sg, nents, dir);
317 /* *********************** FC-NVME Port Management ************************ */
321 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
323 struct nvmet_fc_ls_iod *iod;
326 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
333 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
334 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
335 iod->tgtport = tgtport;
336 list_add_tail(&iod->ls_list, &tgtport->ls_list);
338 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
343 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
345 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
346 NVME_FC_MAX_LS_BUFFER_SIZE,
348 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
356 list_del(&iod->ls_list);
357 for (iod--, i--; i >= 0; iod--, i--) {
358 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
359 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
361 list_del(&iod->ls_list);
370 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
372 struct nvmet_fc_ls_iod *iod = tgtport->iod;
375 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
376 fc_dma_unmap_single(tgtport->dev,
377 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
380 list_del(&iod->ls_list);
385 static struct nvmet_fc_ls_iod *
386 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
388 static struct nvmet_fc_ls_iod *iod;
391 spin_lock_irqsave(&tgtport->lock, flags);
392 iod = list_first_entry_or_null(&tgtport->ls_list,
393 struct nvmet_fc_ls_iod, ls_list);
395 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
396 spin_unlock_irqrestore(&tgtport->lock, flags);
402 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
403 struct nvmet_fc_ls_iod *iod)
407 spin_lock_irqsave(&tgtport->lock, flags);
408 list_move(&iod->ls_list, &tgtport->ls_list);
409 spin_unlock_irqrestore(&tgtport->lock, flags);
413 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
414 struct nvmet_fc_tgt_queue *queue)
416 struct nvmet_fc_fcp_iod *fod = queue->fod;
419 for (i = 0; i < queue->sqsize; fod++, i++) {
420 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
421 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
422 fod->tgtport = tgtport;
426 fod->aborted = false;
428 list_add_tail(&fod->fcp_list, &queue->fod_list);
429 spin_lock_init(&fod->flock);
431 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
432 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
433 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
434 list_del(&fod->fcp_list);
435 for (fod--, i--; i >= 0; fod--, i--) {
436 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
437 sizeof(fod->rspiubuf),
440 list_del(&fod->fcp_list);
449 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
450 struct nvmet_fc_tgt_queue *queue)
452 struct nvmet_fc_fcp_iod *fod = queue->fod;
455 for (i = 0; i < queue->sqsize; fod++, i++) {
457 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
458 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
462 static struct nvmet_fc_fcp_iod *
463 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
465 static struct nvmet_fc_fcp_iod *fod;
468 spin_lock_irqsave(&queue->qlock, flags);
469 fod = list_first_entry_or_null(&queue->fod_list,
470 struct nvmet_fc_fcp_iod, fcp_list);
472 list_del(&fod->fcp_list);
475 * no queue reference is taken, as it was taken by the
476 * queue lookup just prior to the allocation. The iod
477 * will "inherit" that reference.
480 spin_unlock_irqrestore(&queue->qlock, flags);
486 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
487 struct nvmet_fc_fcp_iod *fod)
489 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
490 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
493 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
494 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
496 fcpreq->nvmet_fc_private = NULL;
498 spin_lock_irqsave(&queue->qlock, flags);
499 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
502 fod->aborted = false;
503 fod->writedataactive = false;
505 spin_unlock_irqrestore(&queue->qlock, flags);
508 * release the reference taken at queue lookup and fod allocation
510 nvmet_fc_tgt_q_put(queue);
512 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
516 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
520 if (!(tgtport->ops->target_features &
521 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
522 tgtport->ops->max_hw_queues == 1)
523 return WORK_CPU_UNBOUND;
525 /* Simple cpu selection based on qid modulo active cpu count */
526 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
528 /* find the n'th active cpu */
529 for (cpu = 0, cnt = 0; ; ) {
530 if (cpu_active(cpu)) {
535 cpu = (cpu + 1) % num_possible_cpus();
541 static struct nvmet_fc_tgt_queue *
542 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
545 struct nvmet_fc_tgt_queue *queue;
549 if (qid >= NVMET_NR_QUEUES)
552 queue = kzalloc((sizeof(*queue) +
553 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
558 if (!nvmet_fc_tgt_a_get(assoc))
561 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
562 assoc->tgtport->fc_target_port.port_num,
567 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
569 queue->sqsize = sqsize;
570 queue->assoc = assoc;
571 queue->port = assoc->tgtport->port;
572 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
573 INIT_LIST_HEAD(&queue->fod_list);
574 atomic_set(&queue->connected, 0);
575 atomic_set(&queue->sqtail, 0);
576 atomic_set(&queue->rsn, 1);
577 atomic_set(&queue->zrspcnt, 0);
578 spin_lock_init(&queue->qlock);
579 kref_init(&queue->ref);
581 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
583 ret = nvmet_sq_init(&queue->nvme_sq);
585 goto out_fail_iodlist;
587 WARN_ON(assoc->queues[qid]);
588 spin_lock_irqsave(&assoc->tgtport->lock, flags);
589 assoc->queues[qid] = queue;
590 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
595 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
596 destroy_workqueue(queue->work_q);
598 nvmet_fc_tgt_a_put(assoc);
606 nvmet_fc_tgt_queue_free(struct kref *ref)
608 struct nvmet_fc_tgt_queue *queue =
609 container_of(ref, struct nvmet_fc_tgt_queue, ref);
612 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
613 queue->assoc->queues[queue->qid] = NULL;
614 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
616 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
618 nvmet_fc_tgt_a_put(queue->assoc);
620 destroy_workqueue(queue->work_q);
626 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
628 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
632 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
634 return kref_get_unless_zero(&queue->ref);
639 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
641 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
642 struct nvmet_fc_fcp_iod *fod = queue->fod;
644 int i, writedataactive;
647 disconnect = atomic_xchg(&queue->connected, 0);
649 spin_lock_irqsave(&queue->qlock, flags);
650 /* about outstanding io's */
651 for (i = 0; i < queue->sqsize; fod++, i++) {
653 spin_lock(&fod->flock);
655 writedataactive = fod->writedataactive;
656 spin_unlock(&fod->flock);
658 * only call lldd abort routine if waiting for
659 * writedata. other outstanding ops should finish
662 if (writedataactive) {
663 spin_lock(&fod->flock);
665 spin_unlock(&fod->flock);
666 tgtport->ops->fcp_abort(
667 &tgtport->fc_target_port, fod->fcpreq);
671 spin_unlock_irqrestore(&queue->qlock, flags);
673 flush_workqueue(queue->work_q);
676 nvmet_sq_destroy(&queue->nvme_sq);
678 nvmet_fc_tgt_q_put(queue);
681 static struct nvmet_fc_tgt_queue *
682 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
685 struct nvmet_fc_tgt_assoc *assoc;
686 struct nvmet_fc_tgt_queue *queue;
687 u64 association_id = nvmet_fc_getassociationid(connection_id);
688 u16 qid = nvmet_fc_getqueueid(connection_id);
691 spin_lock_irqsave(&tgtport->lock, flags);
692 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
693 if (association_id == assoc->association_id) {
694 queue = assoc->queues[qid];
696 (!atomic_read(&queue->connected) ||
697 !nvmet_fc_tgt_q_get(queue)))
699 spin_unlock_irqrestore(&tgtport->lock, flags);
703 spin_unlock_irqrestore(&tgtport->lock, flags);
707 static struct nvmet_fc_tgt_assoc *
708 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
710 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
714 bool needrandom = true;
716 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
720 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
724 if (!nvmet_fc_tgtport_get(tgtport))
727 assoc->tgtport = tgtport;
729 INIT_LIST_HEAD(&assoc->a_list);
730 kref_init(&assoc->ref);
733 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
734 ran = ran << BYTES_FOR_QID_SHIFT;
736 spin_lock_irqsave(&tgtport->lock, flags);
738 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
739 if (ran == tmpassoc->association_id) {
744 assoc->association_id = ran;
745 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
747 spin_unlock_irqrestore(&tgtport->lock, flags);
753 ida_simple_remove(&tgtport->assoc_cnt, idx);
760 nvmet_fc_target_assoc_free(struct kref *ref)
762 struct nvmet_fc_tgt_assoc *assoc =
763 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
764 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
767 spin_lock_irqsave(&tgtport->lock, flags);
768 list_del(&assoc->a_list);
769 spin_unlock_irqrestore(&tgtport->lock, flags);
770 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
772 nvmet_fc_tgtport_put(tgtport);
776 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
778 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
782 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
784 return kref_get_unless_zero(&assoc->ref);
788 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
790 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
791 struct nvmet_fc_tgt_queue *queue;
795 spin_lock_irqsave(&tgtport->lock, flags);
796 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
797 queue = assoc->queues[i];
799 if (!nvmet_fc_tgt_q_get(queue))
801 spin_unlock_irqrestore(&tgtport->lock, flags);
802 nvmet_fc_delete_target_queue(queue);
803 nvmet_fc_tgt_q_put(queue);
804 spin_lock_irqsave(&tgtport->lock, flags);
807 spin_unlock_irqrestore(&tgtport->lock, flags);
809 nvmet_fc_tgt_a_put(assoc);
812 static struct nvmet_fc_tgt_assoc *
813 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
816 struct nvmet_fc_tgt_assoc *assoc;
817 struct nvmet_fc_tgt_assoc *ret = NULL;
820 spin_lock_irqsave(&tgtport->lock, flags);
821 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
822 if (association_id == assoc->association_id) {
824 nvmet_fc_tgt_a_get(assoc);
828 spin_unlock_irqrestore(&tgtport->lock, flags);
835 * nvme_fc_register_targetport - transport entry point called by an
836 * LLDD to register the existence of a local
837 * NVME subystem FC port.
838 * @pinfo: pointer to information about the port to be registered
839 * @template: LLDD entrypoints and operational parameters for the port
840 * @dev: physical hardware device node port corresponds to. Will be
841 * used for DMA mappings
842 * @portptr: pointer to a local port pointer. Upon success, the routine
843 * will allocate a nvme_fc_local_port structure and place its
844 * address in the local port pointer. Upon failure, local port
845 * pointer will be set to NULL.
848 * a completion status. Must be 0 upon success; a negative errno
849 * (ex: -ENXIO) upon failure.
852 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
853 struct nvmet_fc_target_template *template,
855 struct nvmet_fc_target_port **portptr)
857 struct nvmet_fc_tgtport *newrec;
861 if (!template->xmt_ls_rsp || !template->fcp_op ||
862 !template->fcp_abort ||
863 !template->fcp_req_release || !template->targetport_delete ||
864 !template->max_hw_queues || !template->max_sgl_segments ||
865 !template->max_dif_sgl_segments || !template->dma_boundary) {
867 goto out_regtgt_failed;
870 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
874 goto out_regtgt_failed;
877 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
883 if (!get_device(dev) && dev) {
888 newrec->fc_target_port.node_name = pinfo->node_name;
889 newrec->fc_target_port.port_name = pinfo->port_name;
890 newrec->fc_target_port.private = &newrec[1];
891 newrec->fc_target_port.port_id = pinfo->port_id;
892 newrec->fc_target_port.port_num = idx;
893 INIT_LIST_HEAD(&newrec->tgt_list);
895 newrec->ops = template;
896 spin_lock_init(&newrec->lock);
897 INIT_LIST_HEAD(&newrec->ls_list);
898 INIT_LIST_HEAD(&newrec->ls_busylist);
899 INIT_LIST_HEAD(&newrec->assoc_list);
900 kref_init(&newrec->ref);
901 ida_init(&newrec->assoc_cnt);
903 ret = nvmet_fc_alloc_ls_iodlist(newrec);
906 goto out_free_newrec;
909 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
910 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
911 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
913 *portptr = &newrec->fc_target_port;
919 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
926 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
930 nvmet_fc_free_tgtport(struct kref *ref)
932 struct nvmet_fc_tgtport *tgtport =
933 container_of(ref, struct nvmet_fc_tgtport, ref);
934 struct device *dev = tgtport->dev;
937 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
938 list_del(&tgtport->tgt_list);
939 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
941 nvmet_fc_free_ls_iodlist(tgtport);
943 /* let the LLDD know we've finished tearing it down */
944 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
946 ida_simple_remove(&nvmet_fc_tgtport_cnt,
947 tgtport->fc_target_port.port_num);
949 ida_destroy(&tgtport->assoc_cnt);
957 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
959 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
963 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
965 return kref_get_unless_zero(&tgtport->ref);
969 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
971 struct nvmet_fc_tgt_assoc *assoc, *next;
974 spin_lock_irqsave(&tgtport->lock, flags);
975 list_for_each_entry_safe(assoc, next,
976 &tgtport->assoc_list, a_list) {
977 if (!nvmet_fc_tgt_a_get(assoc))
979 spin_unlock_irqrestore(&tgtport->lock, flags);
980 nvmet_fc_delete_target_assoc(assoc);
981 nvmet_fc_tgt_a_put(assoc);
982 spin_lock_irqsave(&tgtport->lock, flags);
984 spin_unlock_irqrestore(&tgtport->lock, flags);
988 * nvmet layer has called to terminate an association
991 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
993 struct nvmet_fc_tgtport *tgtport, *next;
994 struct nvmet_fc_tgt_assoc *assoc;
995 struct nvmet_fc_tgt_queue *queue;
997 bool found_ctrl = false;
999 /* this is a bit ugly, but don't want to make locks layered */
1000 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1001 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1003 if (!nvmet_fc_tgtport_get(tgtport))
1005 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1007 spin_lock_irqsave(&tgtport->lock, flags);
1008 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1009 queue = assoc->queues[0];
1010 if (queue && queue->nvme_sq.ctrl == ctrl) {
1011 if (nvmet_fc_tgt_a_get(assoc))
1016 spin_unlock_irqrestore(&tgtport->lock, flags);
1018 nvmet_fc_tgtport_put(tgtport);
1021 nvmet_fc_delete_target_assoc(assoc);
1022 nvmet_fc_tgt_a_put(assoc);
1026 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1028 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1032 * nvme_fc_unregister_targetport - transport entry point called by an
1033 * LLDD to deregister/remove a previously
1034 * registered a local NVME subsystem FC port.
1035 * @tgtport: pointer to the (registered) target port that is to be
1039 * a completion status. Must be 0 upon success; a negative errno
1040 * (ex: -ENXIO) upon failure.
1043 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1045 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1047 /* terminate any outstanding associations */
1048 __nvmet_fc_free_assocs(tgtport);
1050 nvmet_fc_tgtport_put(tgtport);
1054 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1057 /* *********************** FC-NVME LS Handling **************************** */
1061 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, __be32 desc_len, u8 rqst_ls_cmd)
1063 struct fcnvme_ls_acc_hdr *acc = buf;
1065 acc->w0.ls_cmd = ls_cmd;
1066 acc->desc_list_len = desc_len;
1067 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1068 acc->rqst.desc_len =
1069 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1070 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1074 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1075 u8 reason, u8 explanation, u8 vendor)
1077 struct fcnvme_ls_rjt *rjt = buf;
1079 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1080 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1082 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1083 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1084 rjt->rjt.reason_code = reason;
1085 rjt->rjt.reason_explanation = explanation;
1086 rjt->rjt.vendor = vendor;
1088 return sizeof(struct fcnvme_ls_rjt);
1091 /* Validation Error indexes into the string table below */
1094 VERR_CR_ASSOC_LEN = 1,
1095 VERR_CR_ASSOC_RQST_LEN = 2,
1096 VERR_CR_ASSOC_CMD = 3,
1097 VERR_CR_ASSOC_CMD_LEN = 4,
1098 VERR_ERSP_RATIO = 5,
1099 VERR_ASSOC_ALLOC_FAIL = 6,
1100 VERR_QUEUE_ALLOC_FAIL = 7,
1101 VERR_CR_CONN_LEN = 8,
1102 VERR_CR_CONN_RQST_LEN = 9,
1104 VERR_ASSOC_ID_LEN = 11,
1107 VERR_CONN_ID_LEN = 14,
1109 VERR_CR_CONN_CMD = 16,
1110 VERR_CR_CONN_CMD_LEN = 17,
1111 VERR_DISCONN_LEN = 18,
1112 VERR_DISCONN_RQST_LEN = 19,
1113 VERR_DISCONN_CMD = 20,
1114 VERR_DISCONN_CMD_LEN = 21,
1115 VERR_DISCONN_SCOPE = 22,
1117 VERR_RS_RQST_LEN = 24,
1119 VERR_RS_CMD_LEN = 26,
1124 static char *validation_errors[] = {
1126 "Bad CR_ASSOC Length",
1127 "Bad CR_ASSOC Rqst Length",
1129 "Bad CR_ASSOC Cmd Length",
1131 "Association Allocation Failed",
1132 "Queue Allocation Failed",
1133 "Bad CR_CONN Length",
1134 "Bad CR_CONN Rqst Length",
1135 "Not Association ID",
1136 "Bad Association ID Length",
1138 "Not Connection ID",
1139 "Bad Connection ID Length",
1142 "Bad CR_CONN Cmd Length",
1143 "Bad DISCONN Length",
1144 "Bad DISCONN Rqst Length",
1146 "Bad DISCONN Cmd Length",
1147 "Bad Disconnect Scope",
1149 "Bad RS Rqst Length",
1151 "Bad RS Cmd Length",
1153 "Bad RS Relative Offset",
1157 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1158 struct nvmet_fc_ls_iod *iod)
1160 struct fcnvme_ls_cr_assoc_rqst *rqst =
1161 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1162 struct fcnvme_ls_cr_assoc_acc *acc =
1163 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1164 struct nvmet_fc_tgt_queue *queue;
1167 memset(acc, 0, sizeof(*acc));
1169 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1170 ret = VERR_CR_ASSOC_LEN;
1171 else if (rqst->desc_list_len !=
1173 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1174 ret = VERR_CR_ASSOC_RQST_LEN;
1175 else if (rqst->assoc_cmd.desc_tag !=
1176 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1177 ret = VERR_CR_ASSOC_CMD;
1178 else if (rqst->assoc_cmd.desc_len !=
1180 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1181 ret = VERR_CR_ASSOC_CMD_LEN;
1182 else if (!rqst->assoc_cmd.ersp_ratio ||
1183 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1184 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1185 ret = VERR_ERSP_RATIO;
1188 /* new association w/ admin queue */
1189 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1191 ret = VERR_ASSOC_ALLOC_FAIL;
1193 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1194 be16_to_cpu(rqst->assoc_cmd.sqsize));
1196 ret = VERR_QUEUE_ALLOC_FAIL;
1201 dev_err(tgtport->dev,
1202 "Create Association LS failed: %s\n",
1203 validation_errors[ret]);
1204 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1205 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1206 FCNVME_RJT_RC_LOGIC,
1207 FCNVME_RJT_EXP_NONE, 0);
1211 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1212 atomic_set(&queue->connected, 1);
1213 queue->sqhd = 0; /* best place to init value */
1215 /* format a response */
1217 iod->lsreq->rsplen = sizeof(*acc);
1219 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1221 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1222 FCNVME_LS_CREATE_ASSOCIATION);
1223 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1224 acc->associd.desc_len =
1226 sizeof(struct fcnvme_lsdesc_assoc_id));
1227 acc->associd.association_id =
1228 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1229 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1230 acc->connectid.desc_len =
1232 sizeof(struct fcnvme_lsdesc_conn_id));
1233 acc->connectid.connection_id = acc->associd.association_id;
1237 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1238 struct nvmet_fc_ls_iod *iod)
1240 struct fcnvme_ls_cr_conn_rqst *rqst =
1241 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1242 struct fcnvme_ls_cr_conn_acc *acc =
1243 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1244 struct nvmet_fc_tgt_queue *queue;
1247 memset(acc, 0, sizeof(*acc));
1249 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1250 ret = VERR_CR_CONN_LEN;
1251 else if (rqst->desc_list_len !=
1253 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1254 ret = VERR_CR_CONN_RQST_LEN;
1255 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1256 ret = VERR_ASSOC_ID;
1257 else if (rqst->associd.desc_len !=
1259 sizeof(struct fcnvme_lsdesc_assoc_id)))
1260 ret = VERR_ASSOC_ID_LEN;
1261 else if (rqst->connect_cmd.desc_tag !=
1262 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1263 ret = VERR_CR_CONN_CMD;
1264 else if (rqst->connect_cmd.desc_len !=
1266 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1267 ret = VERR_CR_CONN_CMD_LEN;
1268 else if (!rqst->connect_cmd.ersp_ratio ||
1269 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1270 be16_to_cpu(rqst->connect_cmd.sqsize)))
1271 ret = VERR_ERSP_RATIO;
1275 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1276 be64_to_cpu(rqst->associd.association_id));
1278 ret = VERR_NO_ASSOC;
1280 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1281 be16_to_cpu(rqst->connect_cmd.qid),
1282 be16_to_cpu(rqst->connect_cmd.sqsize));
1284 ret = VERR_QUEUE_ALLOC_FAIL;
1286 /* release get taken in nvmet_fc_find_target_assoc */
1287 nvmet_fc_tgt_a_put(iod->assoc);
1292 dev_err(tgtport->dev,
1293 "Create Connection LS failed: %s\n",
1294 validation_errors[ret]);
1295 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1296 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1297 (ret == VERR_NO_ASSOC) ?
1298 FCNVME_RJT_RC_INV_ASSOC :
1299 FCNVME_RJT_RC_LOGIC,
1300 FCNVME_RJT_EXP_NONE, 0);
1304 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1305 atomic_set(&queue->connected, 1);
1306 queue->sqhd = 0; /* best place to init value */
1308 /* format a response */
1310 iod->lsreq->rsplen = sizeof(*acc);
1312 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1313 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1314 FCNVME_LS_CREATE_CONNECTION);
1315 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1316 acc->connectid.desc_len =
1318 sizeof(struct fcnvme_lsdesc_conn_id));
1319 acc->connectid.connection_id =
1320 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1321 be16_to_cpu(rqst->connect_cmd.qid)));
1325 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1326 struct nvmet_fc_ls_iod *iod)
1328 struct fcnvme_ls_disconnect_rqst *rqst =
1329 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1330 struct fcnvme_ls_disconnect_acc *acc =
1331 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1332 struct nvmet_fc_tgt_queue *queue = NULL;
1333 struct nvmet_fc_tgt_assoc *assoc;
1335 bool del_assoc = false;
1337 memset(acc, 0, sizeof(*acc));
1339 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1340 ret = VERR_DISCONN_LEN;
1341 else if (rqst->desc_list_len !=
1343 sizeof(struct fcnvme_ls_disconnect_rqst)))
1344 ret = VERR_DISCONN_RQST_LEN;
1345 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1346 ret = VERR_ASSOC_ID;
1347 else if (rqst->associd.desc_len !=
1349 sizeof(struct fcnvme_lsdesc_assoc_id)))
1350 ret = VERR_ASSOC_ID_LEN;
1351 else if (rqst->discon_cmd.desc_tag !=
1352 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1353 ret = VERR_DISCONN_CMD;
1354 else if (rqst->discon_cmd.desc_len !=
1356 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1357 ret = VERR_DISCONN_CMD_LEN;
1358 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1359 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1360 ret = VERR_DISCONN_SCOPE;
1362 /* match an active association */
1363 assoc = nvmet_fc_find_target_assoc(tgtport,
1364 be64_to_cpu(rqst->associd.association_id));
1367 if (rqst->discon_cmd.scope ==
1368 FCNVME_DISCONN_CONNECTION) {
1369 queue = nvmet_fc_find_target_queue(tgtport,
1371 rqst->discon_cmd.id));
1373 nvmet_fc_tgt_a_put(assoc);
1378 ret = VERR_NO_ASSOC;
1382 dev_err(tgtport->dev,
1383 "Disconnect LS failed: %s\n",
1384 validation_errors[ret]);
1385 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1386 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1387 (ret == VERR_NO_ASSOC) ?
1388 FCNVME_RJT_RC_INV_ASSOC :
1389 (ret == VERR_NO_CONN) ?
1390 FCNVME_RJT_RC_INV_CONN :
1391 FCNVME_RJT_RC_LOGIC,
1392 FCNVME_RJT_EXP_NONE, 0);
1396 /* format a response */
1398 iod->lsreq->rsplen = sizeof(*acc);
1400 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1402 sizeof(struct fcnvme_ls_disconnect_acc)),
1403 FCNVME_LS_DISCONNECT);
1406 /* are we to delete a Connection ID (queue) */
1408 int qid = queue->qid;
1410 nvmet_fc_delete_target_queue(queue);
1412 /* release the get taken by find_target_queue */
1413 nvmet_fc_tgt_q_put(queue);
1415 /* tear association down if io queue terminated */
1420 /* release get taken in nvmet_fc_find_target_assoc */
1421 nvmet_fc_tgt_a_put(iod->assoc);
1424 nvmet_fc_delete_target_assoc(iod->assoc);
1428 /* *********************** NVME Ctrl Routines **************************** */
1431 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1433 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1436 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1438 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1439 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1441 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1442 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1443 nvmet_fc_free_ls_iod(tgtport, iod);
1444 nvmet_fc_tgtport_put(tgtport);
1448 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1449 struct nvmet_fc_ls_iod *iod)
1453 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1454 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1456 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1458 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1462 * Actual processing routine for received FC-NVME LS Requests from the LLD
1465 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1466 struct nvmet_fc_ls_iod *iod)
1468 struct fcnvme_ls_rqst_w0 *w0 =
1469 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1471 iod->lsreq->nvmet_fc_private = iod;
1472 iod->lsreq->rspbuf = iod->rspbuf;
1473 iod->lsreq->rspdma = iod->rspdma;
1474 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1475 /* Be preventative. handlers will later set to valid length */
1476 iod->lsreq->rsplen = 0;
1482 * parse request input, execute the request, and format the
1485 switch (w0->ls_cmd) {
1486 case FCNVME_LS_CREATE_ASSOCIATION:
1487 /* Creates Association and initial Admin Queue/Connection */
1488 nvmet_fc_ls_create_association(tgtport, iod);
1490 case FCNVME_LS_CREATE_CONNECTION:
1491 /* Creates an IO Queue/Connection */
1492 nvmet_fc_ls_create_connection(tgtport, iod);
1494 case FCNVME_LS_DISCONNECT:
1495 /* Terminate a Queue/Connection or the Association */
1496 nvmet_fc_ls_disconnect(tgtport, iod);
1499 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1500 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1501 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1504 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1508 * Actual processing routine for received FC-NVME LS Requests from the LLD
1511 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1513 struct nvmet_fc_ls_iod *iod =
1514 container_of(work, struct nvmet_fc_ls_iod, work);
1515 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1517 nvmet_fc_handle_ls_rqst(tgtport, iod);
1522 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1523 * upon the reception of a NVME LS request.
1525 * The nvmet-fc layer will copy payload to an internal structure for
1526 * processing. As such, upon completion of the routine, the LLDD may
1527 * immediately free/reuse the LS request buffer passed in the call.
1529 * If this routine returns error, the LLDD should abort the exchange.
1531 * @tgtport: pointer to the (registered) target port the LS was
1533 * @lsreq: pointer to a lsreq request structure to be used to reference
1534 * the exchange corresponding to the LS.
1535 * @lsreqbuf: pointer to the buffer containing the LS Request
1536 * @lsreqbuf_len: length, in bytes, of the received LS request
1539 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1540 struct nvmefc_tgt_ls_req *lsreq,
1541 void *lsreqbuf, u32 lsreqbuf_len)
1543 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1544 struct nvmet_fc_ls_iod *iod;
1546 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1549 if (!nvmet_fc_tgtport_get(tgtport))
1552 iod = nvmet_fc_alloc_ls_iod(tgtport);
1554 nvmet_fc_tgtport_put(tgtport);
1560 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1561 iod->rqstdatalen = lsreqbuf_len;
1563 schedule_work(&iod->work);
1567 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1571 * **********************
1572 * Start of FCP handling
1573 * **********************
1577 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1579 struct scatterlist *sg;
1582 u32 page_len, length;
1585 length = fod->total_length;
1586 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1587 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1591 sg_init_table(sg, nent);
1594 page_len = min_t(u32, length, PAGE_SIZE);
1596 page = alloc_page(GFP_KERNEL);
1598 goto out_free_pages;
1600 sg_set_page(&sg[i], page, page_len, 0);
1606 fod->data_sg_cnt = nent;
1607 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1608 ((fod->io_dir == NVMET_FCP_WRITE) ?
1609 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1610 /* note: write from initiator perspective */
1617 __free_page(sg_page(&sg[i]));
1620 fod->data_sg = NULL;
1621 fod->data_sg_cnt = 0;
1623 return NVME_SC_INTERNAL;
1627 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1629 struct scatterlist *sg;
1632 if (!fod->data_sg || !fod->data_sg_cnt)
1635 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1636 ((fod->io_dir == NVMET_FCP_WRITE) ?
1637 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1638 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1639 __free_page(sg_page(sg));
1640 kfree(fod->data_sg);
1641 fod->data_sg = NULL;
1642 fod->data_sg_cnt = 0;
1647 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1651 /* egad, this is ugly. And sqtail is just a best guess */
1652 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1654 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1655 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1660 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1663 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1664 struct nvmet_fc_fcp_iod *fod)
1666 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1667 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1668 struct nvme_completion *cqe = &ersp->cqe;
1669 u32 *cqewd = (u32 *)cqe;
1670 bool send_ersp = false;
1671 u32 rsn, rspcnt, xfr_length;
1673 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1674 xfr_length = fod->total_length;
1676 xfr_length = fod->offset;
1679 * check to see if we can send a 0's rsp.
1680 * Note: to send a 0's response, the NVME-FC host transport will
1681 * recreate the CQE. The host transport knows: sq id, SQHD (last
1682 * seen in an ersp), and command_id. Thus it will create a
1683 * zero-filled CQE with those known fields filled in. Transport
1684 * must send an ersp for any condition where the cqe won't match
1687 * Here are the FC-NVME mandated cases where we must send an ersp:
1688 * every N responses, where N=ersp_ratio
1689 * force fabric commands to send ersp's (not in FC-NVME but good
1691 * normal cmds: any time status is non-zero, or status is zero
1692 * but words 0 or 1 are non-zero.
1693 * the SQ is 90% or more full
1694 * the cmd is a fused command
1695 * transferred data length not equal to cmd iu length
1697 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1698 if (!(rspcnt % fod->queue->ersp_ratio) ||
1699 sqe->opcode == nvme_fabrics_command ||
1700 xfr_length != fod->total_length ||
1701 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1702 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1703 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1706 /* re-set the fields */
1707 fod->fcpreq->rspaddr = ersp;
1708 fod->fcpreq->rspdma = fod->rspdma;
1711 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1712 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1714 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1715 rsn = atomic_inc_return(&fod->queue->rsn);
1716 ersp->rsn = cpu_to_be32(rsn);
1717 ersp->xfrd_len = cpu_to_be32(xfr_length);
1718 fod->fcpreq->rsplen = sizeof(*ersp);
1721 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1722 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1725 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1728 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1729 struct nvmet_fc_fcp_iod *fod)
1731 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1733 /* data no longer needed */
1734 nvmet_fc_free_tgt_pgs(fod);
1737 * if an ABTS was received or we issued the fcp_abort early
1738 * don't call abort routine again.
1740 /* no need to take lock - lock was taken earlier to get here */
1742 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1744 nvmet_fc_free_fcp_iod(fod->queue, fod);
1748 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1749 struct nvmet_fc_fcp_iod *fod)
1753 fod->fcpreq->op = NVMET_FCOP_RSP;
1754 fod->fcpreq->timeout = 0;
1756 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1758 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1760 nvmet_fc_abort_op(tgtport, fod);
1764 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1765 struct nvmet_fc_fcp_iod *fod, u8 op)
1767 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1768 struct scatterlist *sg, *datasg;
1769 unsigned long flags;
1774 fcpreq->offset = fod->offset;
1775 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1776 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1777 (fod->total_length - fod->offset));
1778 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1779 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1781 fcpreq->transfer_length = tlen;
1782 fcpreq->transferred_length = 0;
1783 fcpreq->fcp_error = 0;
1788 datasg = fod->next_sg;
1789 sg_off = fod->next_sg_offset;
1791 for (sg = fcpreq->sg ; tlen; sg++) {
1794 sg->offset += sg_off;
1795 sg->length -= sg_off;
1796 sg->dma_address += sg_off;
1799 if (tlen < sg->length) {
1801 fod->next_sg = datasg;
1802 fod->next_sg_offset += tlen;
1803 } else if (tlen == sg->length) {
1804 fod->next_sg_offset = 0;
1805 fod->next_sg = sg_next(datasg);
1807 fod->next_sg_offset = 0;
1808 datasg = sg_next(datasg);
1815 * If the last READDATA request: check if LLDD supports
1816 * combined xfr with response.
1818 if ((op == NVMET_FCOP_READDATA) &&
1819 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1820 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1821 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1822 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1825 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1828 * should be ok to set w/o lock as its in the thread of
1829 * execution (not an async timer routine) and doesn't
1830 * contend with any clearing action
1834 if (op == NVMET_FCOP_WRITEDATA) {
1835 spin_lock_irqsave(&fod->flock, flags);
1836 fod->writedataactive = false;
1837 spin_unlock_irqrestore(&fod->flock, flags);
1838 nvmet_req_complete(&fod->req,
1839 NVME_SC_FC_TRANSPORT_ERROR);
1840 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1841 fcpreq->fcp_error = ret;
1842 fcpreq->transferred_length = 0;
1843 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1849 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1851 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1852 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1854 /* if in the middle of an io and we need to tear down */
1856 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1857 nvmet_req_complete(&fod->req,
1858 NVME_SC_FC_TRANSPORT_ERROR);
1862 nvmet_fc_abort_op(tgtport, fod);
1870 * actual done handler for FCP operations when completed by the lldd
1873 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1875 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1876 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1877 unsigned long flags;
1880 spin_lock_irqsave(&fod->flock, flags);
1882 fod->writedataactive = false;
1883 spin_unlock_irqrestore(&fod->flock, flags);
1885 switch (fcpreq->op) {
1887 case NVMET_FCOP_WRITEDATA:
1888 if (__nvmet_fc_fod_op_abort(fod, abort))
1890 if (fcpreq->fcp_error ||
1891 fcpreq->transferred_length != fcpreq->transfer_length) {
1892 spin_lock(&fod->flock);
1894 spin_unlock(&fod->flock);
1896 nvmet_req_complete(&fod->req,
1897 NVME_SC_FC_TRANSPORT_ERROR);
1901 fod->offset += fcpreq->transferred_length;
1902 if (fod->offset != fod->total_length) {
1903 spin_lock_irqsave(&fod->flock, flags);
1904 fod->writedataactive = true;
1905 spin_unlock_irqrestore(&fod->flock, flags);
1907 /* transfer the next chunk */
1908 nvmet_fc_transfer_fcp_data(tgtport, fod,
1909 NVMET_FCOP_WRITEDATA);
1913 /* data transfer complete, resume with nvmet layer */
1915 fod->req.execute(&fod->req);
1919 case NVMET_FCOP_READDATA:
1920 case NVMET_FCOP_READDATA_RSP:
1921 if (__nvmet_fc_fod_op_abort(fod, abort))
1923 if (fcpreq->fcp_error ||
1924 fcpreq->transferred_length != fcpreq->transfer_length) {
1925 nvmet_fc_abort_op(tgtport, fod);
1931 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1932 /* data no longer needed */
1933 nvmet_fc_free_tgt_pgs(fod);
1934 nvmet_fc_free_fcp_iod(fod->queue, fod);
1938 fod->offset += fcpreq->transferred_length;
1939 if (fod->offset != fod->total_length) {
1940 /* transfer the next chunk */
1941 nvmet_fc_transfer_fcp_data(tgtport, fod,
1942 NVMET_FCOP_READDATA);
1946 /* data transfer complete, send response */
1948 /* data no longer needed */
1949 nvmet_fc_free_tgt_pgs(fod);
1951 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1955 case NVMET_FCOP_RSP:
1956 if (__nvmet_fc_fod_op_abort(fod, abort))
1958 nvmet_fc_free_fcp_iod(fod->queue, fod);
1967 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1969 struct nvmet_fc_fcp_iod *fod =
1970 container_of(work, struct nvmet_fc_fcp_iod, done_work);
1972 nvmet_fc_fod_op_done(fod);
1976 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1978 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1979 struct nvmet_fc_tgt_queue *queue = fod->queue;
1981 if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1982 /* context switch so completion is not in ISR context */
1983 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1985 nvmet_fc_fod_op_done(fod);
1989 * actual completion handler after execution by the nvmet layer
1992 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1993 struct nvmet_fc_fcp_iod *fod, int status)
1995 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1996 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1997 unsigned long flags;
2000 spin_lock_irqsave(&fod->flock, flags);
2002 spin_unlock_irqrestore(&fod->flock, flags);
2004 /* if we have a CQE, snoop the last sq_head value */
2006 fod->queue->sqhd = cqe->sq_head;
2009 nvmet_fc_abort_op(tgtport, fod);
2013 /* if an error handling the cmd post initial parsing */
2015 /* fudge up a failed CQE status for our transport error */
2016 memset(cqe, 0, sizeof(*cqe));
2017 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2018 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2019 cqe->command_id = sqe->command_id;
2020 cqe->status = cpu_to_le16(status);
2024 * try to push the data even if the SQE status is non-zero.
2025 * There may be a status where data still was intended to
2028 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2029 /* push the data over before sending rsp */
2030 nvmet_fc_transfer_fcp_data(tgtport, fod,
2031 NVMET_FCOP_READDATA);
2035 /* writes & no data - fall thru */
2038 /* data no longer needed */
2039 nvmet_fc_free_tgt_pgs(fod);
2041 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2046 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2048 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2049 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2051 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2056 * Actual processing routine for received FC-NVME LS Requests from the LLD
2059 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2060 struct nvmet_fc_fcp_iod *fod)
2062 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2066 * Fused commands are currently not supported in the linux
2069 * As such, the implementation of the FC transport does not
2070 * look at the fused commands and order delivery to the upper
2071 * layer until we have both based on csn.
2074 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2076 fod->total_length = be32_to_cpu(cmdiu->data_len);
2077 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2078 fod->io_dir = NVMET_FCP_WRITE;
2079 if (!nvme_is_write(&cmdiu->sqe))
2080 goto transport_error;
2081 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2082 fod->io_dir = NVMET_FCP_READ;
2083 if (nvme_is_write(&cmdiu->sqe))
2084 goto transport_error;
2086 fod->io_dir = NVMET_FCP_NODATA;
2087 if (fod->total_length)
2088 goto transport_error;
2091 fod->req.cmd = &fod->cmdiubuf.sqe;
2092 fod->req.rsp = &fod->rspiubuf.cqe;
2093 fod->req.port = fod->queue->port;
2095 /* ensure nvmet handlers will set cmd handler callback */
2096 fod->req.execute = NULL;
2098 /* clear any response payload */
2099 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2101 ret = nvmet_req_init(&fod->req,
2102 &fod->queue->nvme_cq,
2103 &fod->queue->nvme_sq,
2104 &nvmet_fc_tgt_fcp_ops);
2105 if (!ret) { /* bad SQE content or invalid ctrl state */
2106 nvmet_fc_abort_op(tgtport, fod);
2110 /* keep a running counter of tail position */
2111 atomic_inc(&fod->queue->sqtail);
2113 fod->data_sg = NULL;
2114 fod->data_sg_cnt = 0;
2115 if (fod->total_length) {
2116 ret = nvmet_fc_alloc_tgt_pgs(fod);
2118 nvmet_req_complete(&fod->req, ret);
2122 fod->req.sg = fod->data_sg;
2123 fod->req.sg_cnt = fod->data_sg_cnt;
2125 fod->next_sg = fod->data_sg;
2126 fod->next_sg_offset = 0;
2128 if (fod->io_dir == NVMET_FCP_WRITE) {
2129 /* pull the data over before invoking nvmet layer */
2130 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2137 * can invoke the nvmet_layer now. If read data, cmd completion will
2141 fod->req.execute(&fod->req);
2146 nvmet_fc_abort_op(tgtport, fod);
2150 * Actual processing routine for received FC-NVME LS Requests from the LLD
2153 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2155 struct nvmet_fc_fcp_iod *fod =
2156 container_of(work, struct nvmet_fc_fcp_iod, work);
2157 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2159 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2163 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2164 * upon the reception of a NVME FCP CMD IU.
2166 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2167 * layer for processing.
2169 * The nvmet-fc layer will copy cmd payload to an internal structure for
2170 * processing. As such, upon completion of the routine, the LLDD may
2171 * immediately free/reuse the CMD IU buffer passed in the call.
2173 * If this routine returns error, the lldd should abort the exchange.
2175 * @target_port: pointer to the (registered) target port the FCP CMD IU
2177 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2178 * the exchange corresponding to the FCP Exchange.
2179 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2180 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2183 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2184 struct nvmefc_tgt_fcp_req *fcpreq,
2185 void *cmdiubuf, u32 cmdiubuf_len)
2187 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2188 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2189 struct nvmet_fc_tgt_queue *queue;
2190 struct nvmet_fc_fcp_iod *fod;
2192 /* validate iu, so the connection id can be used to find the queue */
2193 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2194 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2195 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2196 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2199 queue = nvmet_fc_find_target_queue(tgtport,
2200 be64_to_cpu(cmdiu->connection_id));
2205 * note: reference taken by find_target_queue
2206 * After successful fod allocation, the fod will inherit the
2207 * ownership of that reference and will remove the reference
2208 * when the fod is freed.
2211 fod = nvmet_fc_alloc_fcp_iod(queue);
2213 /* release the queue lookup reference */
2214 nvmet_fc_tgt_q_put(queue);
2218 fcpreq->nvmet_fc_private = fod;
2219 fod->fcpreq = fcpreq;
2221 * put all admin cmds on hw queue id 0. All io commands go to
2222 * the respective hw queue based on a modulo basis
2224 fcpreq->hwqid = queue->qid ?
2225 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2226 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2228 if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2229 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2231 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2235 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2238 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2239 * upon the reception of an ABTS for a FCP command
2241 * Notify the transport that an ABTS has been received for a FCP command
2242 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2243 * LLDD believes the command is still being worked on
2244 * (template_ops->fcp_req_release() has not been called).
2246 * The transport will wait for any outstanding work (an op to the LLDD,
2247 * which the lldd should complete with error due to the ABTS; or the
2248 * completion from the nvmet layer of the nvme command), then will
2249 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2250 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2251 * to the ABTS either after return from this function (assuming any
2252 * outstanding op work has been terminated) or upon the callback being
2255 * @target_port: pointer to the (registered) target port the FCP CMD IU
2257 * @fcpreq: pointer to the fcpreq request structure that corresponds
2258 * to the exchange that received the ABTS.
2261 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2262 struct nvmefc_tgt_fcp_req *fcpreq)
2264 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2265 struct nvmet_fc_tgt_queue *queue;
2266 unsigned long flags;
2268 if (!fod || fod->fcpreq != fcpreq)
2269 /* job appears to have already completed, ignore abort */
2274 spin_lock_irqsave(&queue->qlock, flags);
2277 * mark as abort. The abort handler, invoked upon completion
2278 * of any work, will detect the aborted status and do the
2281 spin_lock(&fod->flock);
2283 fod->aborted = true;
2284 spin_unlock(&fod->flock);
2286 spin_unlock_irqrestore(&queue->qlock, flags);
2288 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2292 FCT_TRADDR_WWNN = 1 << 0,
2293 FCT_TRADDR_WWPN = 1 << 1,
2296 struct nvmet_fc_traddr {
2301 static const match_table_t traddr_opt_tokens = {
2302 { FCT_TRADDR_WWNN, "nn-%s" },
2303 { FCT_TRADDR_WWPN, "pn-%s" },
2304 { FCT_TRADDR_ERR, NULL }
2308 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2310 substring_t args[MAX_OPT_ARGS];
2311 char *options, *o, *p;
2315 options = o = kstrdup(buf, GFP_KERNEL);
2319 while ((p = strsep(&o, ":\n")) != NULL) {
2323 token = match_token(p, traddr_opt_tokens, args);
2325 case FCT_TRADDR_WWNN:
2326 if (match_u64(args, &token64)) {
2330 traddr->nn = token64;
2332 case FCT_TRADDR_WWPN:
2333 if (match_u64(args, &token64)) {
2337 traddr->pn = token64;
2340 pr_warn("unknown traddr token or missing value '%s'\n",
2353 nvmet_fc_add_port(struct nvmet_port *port)
2355 struct nvmet_fc_tgtport *tgtport;
2356 struct nvmet_fc_traddr traddr = { 0L, 0L };
2357 unsigned long flags;
2360 /* validate the address info */
2361 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2362 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2365 /* map the traddr address info to a target port */
2367 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2372 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2373 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2374 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2375 (tgtport->fc_target_port.port_name == traddr.pn)) {
2376 /* a FC port can only be 1 nvmet port id */
2377 if (!tgtport->port) {
2378 tgtport->port = port;
2379 port->priv = tgtport;
2380 nvmet_fc_tgtport_get(tgtport);
2387 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2392 nvmet_fc_remove_port(struct nvmet_port *port)
2394 struct nvmet_fc_tgtport *tgtport = port->priv;
2395 unsigned long flags;
2397 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2398 if (tgtport->port == port) {
2399 nvmet_fc_tgtport_put(tgtport);
2400 tgtport->port = NULL;
2402 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2405 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2406 .owner = THIS_MODULE,
2407 .type = NVMF_TRTYPE_FC,
2409 .add_port = nvmet_fc_add_port,
2410 .remove_port = nvmet_fc_remove_port,
2411 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2412 .delete_ctrl = nvmet_fc_delete_ctrl,
2415 static int __init nvmet_fc_init_module(void)
2417 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2420 static void __exit nvmet_fc_exit_module(void)
2422 /* sanity check - all lports should be removed */
2423 if (!list_empty(&nvmet_fc_target_list))
2424 pr_warn("%s: targetport list not empty\n", __func__);
2426 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2428 ida_destroy(&nvmet_fc_tgtport_cnt);
2431 module_init(nvmet_fc_init_module);
2432 module_exit(nvmet_fc_exit_module);
2434 MODULE_LICENSE("GPL v2");