2 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful.
9 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10 * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11 * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12 * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13 * See the GNU General Public License for more details, a copy of which
14 * can be found in the file COPYING included with this package
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
31 /* *************************** Data Structures/Defines ****************** */
34 #define NVMET_LS_CTX_COUNT 4
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
42 struct nvmet_fc_ls_iod {
43 struct nvmefc_tgt_ls_req *lsreq;
44 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
46 struct list_head ls_list; /* tgtport->ls_list */
48 struct nvmet_fc_tgtport *tgtport;
49 struct nvmet_fc_tgt_assoc *assoc;
56 struct scatterlist sg[2];
58 struct work_struct work;
59 } __aligned(sizeof(unsigned long long));
61 #define NVMET_FC_MAX_KB_PER_XFR 256
63 enum nvmet_fcp_datadir {
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
76 struct scatterlist *data_sg;
77 struct scatterlist *next_sg;
82 enum nvmet_fcp_datadir io_dir;
88 struct work_struct work;
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
93 struct list_head fcp_list; /* tgtport->fcp_list */
96 struct nvmet_fc_tgtport {
98 struct nvmet_fc_target_port fc_target_port;
100 struct list_head tgt_list; /* nvmet_fc_target_list */
101 struct device *dev; /* dev for dma mapping */
102 struct nvmet_fc_target_template *ops;
104 struct nvmet_fc_ls_iod *iod;
106 struct list_head ls_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct ida assoc_cnt;
110 struct nvmet_port *port;
114 struct nvmet_fc_tgt_queue {
126 struct nvmet_port *port;
127 struct nvmet_cq nvme_cq;
128 struct nvmet_sq nvme_sq;
129 struct nvmet_fc_tgt_assoc *assoc;
130 struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
131 struct list_head fod_list;
132 struct workqueue_struct *work_q;
134 } __aligned(sizeof(unsigned long long));
136 struct nvmet_fc_tgt_assoc {
139 struct nvmet_fc_tgtport *tgtport;
140 struct list_head a_list;
141 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
147 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
149 return (iodptr - iodptr->tgtport->iod);
153 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
155 return (fodptr - fodptr->queue->fod);
160 * Association and Connection IDs:
162 * Association ID will have random number in upper 6 bytes and zero
165 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
167 * note: Association ID = Connection ID for queue 0
169 #define BYTES_FOR_QID sizeof(u16)
170 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
171 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
174 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
176 return (assoc->association_id | qid);
180 nvmet_fc_getassociationid(u64 connectionid)
182 return connectionid & ~NVMET_FC_QUEUEID_MASK;
186 nvmet_fc_getqueueid(u64 connectionid)
188 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
191 static inline struct nvmet_fc_tgtport *
192 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
194 return container_of(targetport, struct nvmet_fc_tgtport,
198 static inline struct nvmet_fc_fcp_iod *
199 nvmet_req_to_fod(struct nvmet_req *nvme_req)
201 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
205 /* *************************** Globals **************************** */
208 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
210 static LIST_HEAD(nvmet_fc_target_list);
211 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
214 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
215 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
216 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
217 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
218 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
219 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
220 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
221 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
224 /* *********************** FC-NVME DMA Handling **************************** */
227 * The fcloop device passes in a NULL device pointer. Real LLD's will
228 * pass in a valid device pointer. If NULL is passed to the dma mapping
229 * routines, depending on the platform, it may or may not succeed, and
233 * Wrapper all the dma routines and check the dev pointer.
235 * If simple mappings (return just a dma address, we'll noop them,
236 * returning a dma address of 0.
238 * On more complex mappings (dma_map_sg), a pseudo routine fills
239 * in the scatter list, setting all dma addresses to 0.
242 static inline dma_addr_t
243 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
244 enum dma_data_direction dir)
246 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
250 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
252 return dev ? dma_mapping_error(dev, dma_addr) : 0;
256 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
257 enum dma_data_direction dir)
260 dma_unmap_single(dev, addr, size, dir);
264 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
265 enum dma_data_direction dir)
268 dma_sync_single_for_cpu(dev, addr, size, dir);
272 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
273 enum dma_data_direction dir)
276 dma_sync_single_for_device(dev, addr, size, dir);
279 /* pseudo dma_map_sg call */
281 fc_map_sg(struct scatterlist *sg, int nents)
283 struct scatterlist *s;
286 WARN_ON(nents == 0 || sg[0].length == 0);
288 for_each_sg(sg, s, nents, i) {
290 #ifdef CONFIG_NEED_SG_DMA_LENGTH
291 s->dma_length = s->length;
298 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
299 enum dma_data_direction dir)
301 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
305 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
306 enum dma_data_direction dir)
309 dma_unmap_sg(dev, sg, nents, dir);
313 /* *********************** FC-NVME Port Management ************************ */
317 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
319 struct nvmet_fc_ls_iod *iod;
322 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
329 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
330 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
331 iod->tgtport = tgtport;
332 list_add_tail(&iod->ls_list, &tgtport->ls_list);
334 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
339 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
341 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
342 NVME_FC_MAX_LS_BUFFER_SIZE,
344 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
352 list_del(&iod->ls_list);
353 for (iod--, i--; i >= 0; iod--, i--) {
354 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
355 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
357 list_del(&iod->ls_list);
366 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
368 struct nvmet_fc_ls_iod *iod = tgtport->iod;
371 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
372 fc_dma_unmap_single(tgtport->dev,
373 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
376 list_del(&iod->ls_list);
381 static struct nvmet_fc_ls_iod *
382 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
384 static struct nvmet_fc_ls_iod *iod;
387 spin_lock_irqsave(&tgtport->lock, flags);
388 iod = list_first_entry_or_null(&tgtport->ls_list,
389 struct nvmet_fc_ls_iod, ls_list);
391 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
392 spin_unlock_irqrestore(&tgtport->lock, flags);
398 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
399 struct nvmet_fc_ls_iod *iod)
403 spin_lock_irqsave(&tgtport->lock, flags);
404 list_move(&iod->ls_list, &tgtport->ls_list);
405 spin_unlock_irqrestore(&tgtport->lock, flags);
409 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
410 struct nvmet_fc_tgt_queue *queue)
412 struct nvmet_fc_fcp_iod *fod = queue->fod;
415 for (i = 0; i < queue->sqsize; fod++, i++) {
416 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
417 fod->tgtport = tgtport;
420 list_add_tail(&fod->fcp_list, &queue->fod_list);
421 spin_lock_init(&fod->flock);
423 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
424 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
425 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
426 list_del(&fod->fcp_list);
427 for (fod--, i--; i >= 0; fod--, i--) {
428 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
429 sizeof(fod->rspiubuf),
432 list_del(&fod->fcp_list);
441 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
442 struct nvmet_fc_tgt_queue *queue)
444 struct nvmet_fc_fcp_iod *fod = queue->fod;
447 for (i = 0; i < queue->sqsize; fod++, i++) {
449 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
450 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
454 static struct nvmet_fc_fcp_iod *
455 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
457 static struct nvmet_fc_fcp_iod *fod;
460 spin_lock_irqsave(&queue->qlock, flags);
461 fod = list_first_entry_or_null(&queue->fod_list,
462 struct nvmet_fc_fcp_iod, fcp_list);
464 list_del(&fod->fcp_list);
468 * no queue reference is taken, as it was taken by the
469 * queue lookup just prior to the allocation. The iod
470 * will "inherit" that reference.
473 spin_unlock_irqrestore(&queue->qlock, flags);
479 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
480 struct nvmet_fc_fcp_iod *fod)
484 spin_lock_irqsave(&queue->qlock, flags);
485 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
487 spin_unlock_irqrestore(&queue->qlock, flags);
490 * release the reference taken at queue lookup and fod allocation
492 nvmet_fc_tgt_q_put(queue);
496 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
500 if (!(tgtport->ops->target_features &
501 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
502 tgtport->ops->max_hw_queues == 1)
503 return WORK_CPU_UNBOUND;
505 /* Simple cpu selection based on qid modulo active cpu count */
506 idx = !qid ? 0 : (qid - 1) % num_active_cpus();
508 /* find the n'th active cpu */
509 for (cpu = 0, cnt = 0; ; ) {
510 if (cpu_active(cpu)) {
515 cpu = (cpu + 1) % num_possible_cpus();
521 static struct nvmet_fc_tgt_queue *
522 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
525 struct nvmet_fc_tgt_queue *queue;
529 if (qid >= NVMET_NR_QUEUES)
532 queue = kzalloc((sizeof(*queue) +
533 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
538 if (!nvmet_fc_tgt_a_get(assoc))
541 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
542 assoc->tgtport->fc_target_port.port_num,
547 queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
549 queue->sqsize = sqsize;
550 queue->assoc = assoc;
551 queue->port = assoc->tgtport->port;
552 queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
553 INIT_LIST_HEAD(&queue->fod_list);
554 atomic_set(&queue->connected, 0);
555 atomic_set(&queue->sqtail, 0);
556 atomic_set(&queue->rsn, 1);
557 atomic_set(&queue->zrspcnt, 0);
558 spin_lock_init(&queue->qlock);
559 kref_init(&queue->ref);
561 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
563 ret = nvmet_sq_init(&queue->nvme_sq);
565 goto out_fail_iodlist;
567 WARN_ON(assoc->queues[qid]);
568 spin_lock_irqsave(&assoc->tgtport->lock, flags);
569 assoc->queues[qid] = queue;
570 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
575 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
576 destroy_workqueue(queue->work_q);
578 nvmet_fc_tgt_a_put(assoc);
586 nvmet_fc_tgt_queue_free(struct kref *ref)
588 struct nvmet_fc_tgt_queue *queue =
589 container_of(ref, struct nvmet_fc_tgt_queue, ref);
592 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
593 queue->assoc->queues[queue->qid] = NULL;
594 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
596 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
598 nvmet_fc_tgt_a_put(queue->assoc);
600 destroy_workqueue(queue->work_q);
606 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
608 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
612 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
614 return kref_get_unless_zero(&queue->ref);
619 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
620 struct nvmefc_tgt_fcp_req *fcpreq)
624 fcpreq->op = NVMET_FCOP_ABORT;
627 fcpreq->transfer_length = 0;
628 fcpreq->transferred_length = 0;
629 fcpreq->fcp_error = 0;
632 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
634 /* should never reach here !! */
640 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
642 struct nvmet_fc_fcp_iod *fod = queue->fod;
647 disconnect = atomic_xchg(&queue->connected, 0);
649 spin_lock_irqsave(&queue->qlock, flags);
650 /* about outstanding io's */
651 for (i = 0; i < queue->sqsize; fod++, i++) {
653 spin_lock(&fod->flock);
655 spin_unlock(&fod->flock);
658 spin_unlock_irqrestore(&queue->qlock, flags);
660 flush_workqueue(queue->work_q);
663 nvmet_sq_destroy(&queue->nvme_sq);
665 nvmet_fc_tgt_q_put(queue);
668 static struct nvmet_fc_tgt_queue *
669 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
672 struct nvmet_fc_tgt_assoc *assoc;
673 struct nvmet_fc_tgt_queue *queue;
674 u64 association_id = nvmet_fc_getassociationid(connection_id);
675 u16 qid = nvmet_fc_getqueueid(connection_id);
678 spin_lock_irqsave(&tgtport->lock, flags);
679 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
680 if (association_id == assoc->association_id) {
681 queue = assoc->queues[qid];
683 (!atomic_read(&queue->connected) ||
684 !nvmet_fc_tgt_q_get(queue)))
686 spin_unlock_irqrestore(&tgtport->lock, flags);
690 spin_unlock_irqrestore(&tgtport->lock, flags);
694 static struct nvmet_fc_tgt_assoc *
695 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
697 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
701 bool needrandom = true;
703 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
707 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
711 if (!nvmet_fc_tgtport_get(tgtport))
714 assoc->tgtport = tgtport;
716 INIT_LIST_HEAD(&assoc->a_list);
717 kref_init(&assoc->ref);
720 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
721 ran = ran << BYTES_FOR_QID_SHIFT;
723 spin_lock_irqsave(&tgtport->lock, flags);
725 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
726 if (ran == tmpassoc->association_id) {
731 assoc->association_id = ran;
732 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
734 spin_unlock_irqrestore(&tgtport->lock, flags);
740 ida_simple_remove(&tgtport->assoc_cnt, idx);
747 nvmet_fc_target_assoc_free(struct kref *ref)
749 struct nvmet_fc_tgt_assoc *assoc =
750 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
751 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
754 spin_lock_irqsave(&tgtport->lock, flags);
755 list_del(&assoc->a_list);
756 spin_unlock_irqrestore(&tgtport->lock, flags);
757 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
759 nvmet_fc_tgtport_put(tgtport);
763 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
765 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
769 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
771 return kref_get_unless_zero(&assoc->ref);
775 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
777 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
778 struct nvmet_fc_tgt_queue *queue;
782 spin_lock_irqsave(&tgtport->lock, flags);
783 for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
784 queue = assoc->queues[i];
786 if (!nvmet_fc_tgt_q_get(queue))
788 spin_unlock_irqrestore(&tgtport->lock, flags);
789 nvmet_fc_delete_target_queue(queue);
790 nvmet_fc_tgt_q_put(queue);
791 spin_lock_irqsave(&tgtport->lock, flags);
794 spin_unlock_irqrestore(&tgtport->lock, flags);
796 nvmet_fc_tgt_a_put(assoc);
799 static struct nvmet_fc_tgt_assoc *
800 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
803 struct nvmet_fc_tgt_assoc *assoc;
804 struct nvmet_fc_tgt_assoc *ret = NULL;
807 spin_lock_irqsave(&tgtport->lock, flags);
808 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
809 if (association_id == assoc->association_id) {
811 nvmet_fc_tgt_a_get(assoc);
815 spin_unlock_irqrestore(&tgtport->lock, flags);
822 * nvme_fc_register_targetport - transport entry point called by an
823 * LLDD to register the existence of a local
824 * NVME subystem FC port.
825 * @pinfo: pointer to information about the port to be registered
826 * @template: LLDD entrypoints and operational parameters for the port
827 * @dev: physical hardware device node port corresponds to. Will be
828 * used for DMA mappings
829 * @portptr: pointer to a local port pointer. Upon success, the routine
830 * will allocate a nvme_fc_local_port structure and place its
831 * address in the local port pointer. Upon failure, local port
832 * pointer will be set to NULL.
835 * a completion status. Must be 0 upon success; a negative errno
836 * (ex: -ENXIO) upon failure.
839 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
840 struct nvmet_fc_target_template *template,
842 struct nvmet_fc_target_port **portptr)
844 struct nvmet_fc_tgtport *newrec;
848 if (!template->xmt_ls_rsp || !template->fcp_op ||
849 !template->targetport_delete ||
850 !template->max_hw_queues || !template->max_sgl_segments ||
851 !template->max_dif_sgl_segments || !template->dma_boundary) {
853 goto out_regtgt_failed;
856 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
860 goto out_regtgt_failed;
863 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
869 if (!get_device(dev) && dev) {
874 newrec->fc_target_port.node_name = pinfo->node_name;
875 newrec->fc_target_port.port_name = pinfo->port_name;
876 newrec->fc_target_port.private = &newrec[1];
877 newrec->fc_target_port.port_id = pinfo->port_id;
878 newrec->fc_target_port.port_num = idx;
879 INIT_LIST_HEAD(&newrec->tgt_list);
881 newrec->ops = template;
882 spin_lock_init(&newrec->lock);
883 INIT_LIST_HEAD(&newrec->ls_list);
884 INIT_LIST_HEAD(&newrec->ls_busylist);
885 INIT_LIST_HEAD(&newrec->assoc_list);
886 kref_init(&newrec->ref);
887 ida_init(&newrec->assoc_cnt);
889 ret = nvmet_fc_alloc_ls_iodlist(newrec);
892 goto out_free_newrec;
895 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
896 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
897 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
899 *portptr = &newrec->fc_target_port;
905 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
912 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
916 nvmet_fc_free_tgtport(struct kref *ref)
918 struct nvmet_fc_tgtport *tgtport =
919 container_of(ref, struct nvmet_fc_tgtport, ref);
920 struct device *dev = tgtport->dev;
923 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
924 list_del(&tgtport->tgt_list);
925 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
927 nvmet_fc_free_ls_iodlist(tgtport);
929 /* let the LLDD know we've finished tearing it down */
930 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
932 ida_simple_remove(&nvmet_fc_tgtport_cnt,
933 tgtport->fc_target_port.port_num);
935 ida_destroy(&tgtport->assoc_cnt);
943 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
945 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
949 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
951 return kref_get_unless_zero(&tgtport->ref);
955 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
957 struct nvmet_fc_tgt_assoc *assoc, *next;
960 spin_lock_irqsave(&tgtport->lock, flags);
961 list_for_each_entry_safe(assoc, next,
962 &tgtport->assoc_list, a_list) {
963 if (!nvmet_fc_tgt_a_get(assoc))
965 spin_unlock_irqrestore(&tgtport->lock, flags);
966 nvmet_fc_delete_target_assoc(assoc);
967 nvmet_fc_tgt_a_put(assoc);
968 spin_lock_irqsave(&tgtport->lock, flags);
970 spin_unlock_irqrestore(&tgtport->lock, flags);
974 * nvmet layer has called to terminate an association
977 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
979 struct nvmet_fc_tgtport *tgtport, *next;
980 struct nvmet_fc_tgt_assoc *assoc;
981 struct nvmet_fc_tgt_queue *queue;
983 bool found_ctrl = false;
985 /* this is a bit ugly, but don't want to make locks layered */
986 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
987 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
989 if (!nvmet_fc_tgtport_get(tgtport))
991 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
993 spin_lock_irqsave(&tgtport->lock, flags);
994 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
995 queue = assoc->queues[0];
996 if (queue && queue->nvme_sq.ctrl == ctrl) {
997 if (nvmet_fc_tgt_a_get(assoc))
1002 spin_unlock_irqrestore(&tgtport->lock, flags);
1004 nvmet_fc_tgtport_put(tgtport);
1007 nvmet_fc_delete_target_assoc(assoc);
1008 nvmet_fc_tgt_a_put(assoc);
1012 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1014 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1018 * nvme_fc_unregister_targetport - transport entry point called by an
1019 * LLDD to deregister/remove a previously
1020 * registered a local NVME subsystem FC port.
1021 * @tgtport: pointer to the (registered) target port that is to be
1025 * a completion status. Must be 0 upon success; a negative errno
1026 * (ex: -ENXIO) upon failure.
1029 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1031 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1033 /* terminate any outstanding associations */
1034 __nvmet_fc_free_assocs(tgtport);
1036 nvmet_fc_tgtport_put(tgtport);
1040 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1043 /* *********************** FC-NVME LS Handling **************************** */
1047 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1049 struct fcnvme_ls_acc_hdr *acc = buf;
1051 acc->w0.ls_cmd = ls_cmd;
1052 acc->desc_list_len = desc_len;
1053 acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1054 acc->rqst.desc_len =
1055 fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1056 acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1060 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1061 u8 reason, u8 explanation, u8 vendor)
1063 struct fcnvme_ls_rjt *rjt = buf;
1065 nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1066 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1068 rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1069 rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1070 rjt->rjt.reason_code = reason;
1071 rjt->rjt.reason_explanation = explanation;
1072 rjt->rjt.vendor = vendor;
1074 return sizeof(struct fcnvme_ls_rjt);
1077 /* Validation Error indexes into the string table below */
1080 VERR_CR_ASSOC_LEN = 1,
1081 VERR_CR_ASSOC_RQST_LEN = 2,
1082 VERR_CR_ASSOC_CMD = 3,
1083 VERR_CR_ASSOC_CMD_LEN = 4,
1084 VERR_ERSP_RATIO = 5,
1085 VERR_ASSOC_ALLOC_FAIL = 6,
1086 VERR_QUEUE_ALLOC_FAIL = 7,
1087 VERR_CR_CONN_LEN = 8,
1088 VERR_CR_CONN_RQST_LEN = 9,
1090 VERR_ASSOC_ID_LEN = 11,
1093 VERR_CONN_ID_LEN = 14,
1095 VERR_CR_CONN_CMD = 16,
1096 VERR_CR_CONN_CMD_LEN = 17,
1097 VERR_DISCONN_LEN = 18,
1098 VERR_DISCONN_RQST_LEN = 19,
1099 VERR_DISCONN_CMD = 20,
1100 VERR_DISCONN_CMD_LEN = 21,
1101 VERR_DISCONN_SCOPE = 22,
1103 VERR_RS_RQST_LEN = 24,
1105 VERR_RS_CMD_LEN = 26,
1110 static char *validation_errors[] = {
1112 "Bad CR_ASSOC Length",
1113 "Bad CR_ASSOC Rqst Length",
1115 "Bad CR_ASSOC Cmd Length",
1117 "Association Allocation Failed",
1118 "Queue Allocation Failed",
1119 "Bad CR_CONN Length",
1120 "Bad CR_CONN Rqst Length",
1121 "Not Association ID",
1122 "Bad Association ID Length",
1124 "Not Connection ID",
1125 "Bad Connection ID Length",
1128 "Bad CR_CONN Cmd Length",
1129 "Bad DISCONN Length",
1130 "Bad DISCONN Rqst Length",
1132 "Bad DISCONN Cmd Length",
1133 "Bad Disconnect Scope",
1135 "Bad RS Rqst Length",
1137 "Bad RS Cmd Length",
1139 "Bad RS Relative Offset",
1143 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1144 struct nvmet_fc_ls_iod *iod)
1146 struct fcnvme_ls_cr_assoc_rqst *rqst =
1147 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1148 struct fcnvme_ls_cr_assoc_acc *acc =
1149 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1150 struct nvmet_fc_tgt_queue *queue;
1153 memset(acc, 0, sizeof(*acc));
1155 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1156 ret = VERR_CR_ASSOC_LEN;
1157 else if (rqst->desc_list_len !=
1159 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1160 ret = VERR_CR_ASSOC_RQST_LEN;
1161 else if (rqst->assoc_cmd.desc_tag !=
1162 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1163 ret = VERR_CR_ASSOC_CMD;
1164 else if (rqst->assoc_cmd.desc_len !=
1166 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1167 ret = VERR_CR_ASSOC_CMD_LEN;
1168 else if (!rqst->assoc_cmd.ersp_ratio ||
1169 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1170 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1171 ret = VERR_ERSP_RATIO;
1174 /* new association w/ admin queue */
1175 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1177 ret = VERR_ASSOC_ALLOC_FAIL;
1179 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1180 be16_to_cpu(rqst->assoc_cmd.sqsize));
1182 ret = VERR_QUEUE_ALLOC_FAIL;
1187 dev_err(tgtport->dev,
1188 "Create Association LS failed: %s\n",
1189 validation_errors[ret]);
1190 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1191 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1197 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1198 atomic_set(&queue->connected, 1);
1199 queue->sqhd = 0; /* best place to init value */
1201 /* format a response */
1203 iod->lsreq->rsplen = sizeof(*acc);
1205 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1207 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1208 FCNVME_LS_CREATE_ASSOCIATION);
1209 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1210 acc->associd.desc_len =
1212 sizeof(struct fcnvme_lsdesc_assoc_id));
1213 acc->associd.association_id =
1214 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1215 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1216 acc->connectid.desc_len =
1218 sizeof(struct fcnvme_lsdesc_conn_id));
1219 acc->connectid.connection_id = acc->associd.association_id;
1223 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1224 struct nvmet_fc_ls_iod *iod)
1226 struct fcnvme_ls_cr_conn_rqst *rqst =
1227 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1228 struct fcnvme_ls_cr_conn_acc *acc =
1229 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1230 struct nvmet_fc_tgt_queue *queue;
1233 memset(acc, 0, sizeof(*acc));
1235 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1236 ret = VERR_CR_CONN_LEN;
1237 else if (rqst->desc_list_len !=
1239 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1240 ret = VERR_CR_CONN_RQST_LEN;
1241 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1242 ret = VERR_ASSOC_ID;
1243 else if (rqst->associd.desc_len !=
1245 sizeof(struct fcnvme_lsdesc_assoc_id)))
1246 ret = VERR_ASSOC_ID_LEN;
1247 else if (rqst->connect_cmd.desc_tag !=
1248 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1249 ret = VERR_CR_CONN_CMD;
1250 else if (rqst->connect_cmd.desc_len !=
1252 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1253 ret = VERR_CR_CONN_CMD_LEN;
1254 else if (!rqst->connect_cmd.ersp_ratio ||
1255 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1256 be16_to_cpu(rqst->connect_cmd.sqsize)))
1257 ret = VERR_ERSP_RATIO;
1261 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1262 be64_to_cpu(rqst->associd.association_id));
1264 ret = VERR_NO_ASSOC;
1266 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1267 be16_to_cpu(rqst->connect_cmd.qid),
1268 be16_to_cpu(rqst->connect_cmd.sqsize));
1270 ret = VERR_QUEUE_ALLOC_FAIL;
1272 /* release get taken in nvmet_fc_find_target_assoc */
1273 nvmet_fc_tgt_a_put(iod->assoc);
1278 dev_err(tgtport->dev,
1279 "Create Connection LS failed: %s\n",
1280 validation_errors[ret]);
1281 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1282 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1283 (ret == VERR_NO_ASSOC) ?
1284 ELS_RJT_PROT : ELS_RJT_LOGIC,
1289 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1290 atomic_set(&queue->connected, 1);
1291 queue->sqhd = 0; /* best place to init value */
1293 /* format a response */
1295 iod->lsreq->rsplen = sizeof(*acc);
1297 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1298 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1299 FCNVME_LS_CREATE_CONNECTION);
1300 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1301 acc->connectid.desc_len =
1303 sizeof(struct fcnvme_lsdesc_conn_id));
1304 acc->connectid.connection_id =
1305 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1306 be16_to_cpu(rqst->connect_cmd.qid)));
1310 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1311 struct nvmet_fc_ls_iod *iod)
1313 struct fcnvme_ls_disconnect_rqst *rqst =
1314 (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1315 struct fcnvme_ls_disconnect_acc *acc =
1316 (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1317 struct nvmet_fc_tgt_queue *queue = NULL;
1318 struct nvmet_fc_tgt_assoc *assoc;
1320 bool del_assoc = false;
1322 memset(acc, 0, sizeof(*acc));
1324 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1325 ret = VERR_DISCONN_LEN;
1326 else if (rqst->desc_list_len !=
1328 sizeof(struct fcnvme_ls_disconnect_rqst)))
1329 ret = VERR_DISCONN_RQST_LEN;
1330 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1331 ret = VERR_ASSOC_ID;
1332 else if (rqst->associd.desc_len !=
1334 sizeof(struct fcnvme_lsdesc_assoc_id)))
1335 ret = VERR_ASSOC_ID_LEN;
1336 else if (rqst->discon_cmd.desc_tag !=
1337 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1338 ret = VERR_DISCONN_CMD;
1339 else if (rqst->discon_cmd.desc_len !=
1341 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1342 ret = VERR_DISCONN_CMD_LEN;
1343 else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1344 (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1345 ret = VERR_DISCONN_SCOPE;
1347 /* match an active association */
1348 assoc = nvmet_fc_find_target_assoc(tgtport,
1349 be64_to_cpu(rqst->associd.association_id));
1352 if (rqst->discon_cmd.scope ==
1353 FCNVME_DISCONN_CONNECTION) {
1354 queue = nvmet_fc_find_target_queue(tgtport,
1356 rqst->discon_cmd.id));
1358 nvmet_fc_tgt_a_put(assoc);
1363 ret = VERR_NO_ASSOC;
1367 dev_err(tgtport->dev,
1368 "Disconnect LS failed: %s\n",
1369 validation_errors[ret]);
1370 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1371 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1372 (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
1377 /* format a response */
1379 iod->lsreq->rsplen = sizeof(*acc);
1381 nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1383 sizeof(struct fcnvme_ls_disconnect_acc)),
1384 FCNVME_LS_DISCONNECT);
1387 /* are we to delete a Connection ID (queue) */
1389 int qid = queue->qid;
1391 nvmet_fc_delete_target_queue(queue);
1393 /* release the get taken by find_target_queue */
1394 nvmet_fc_tgt_q_put(queue);
1396 /* tear association down if io queue terminated */
1401 /* release get taken in nvmet_fc_find_target_assoc */
1402 nvmet_fc_tgt_a_put(iod->assoc);
1405 nvmet_fc_delete_target_assoc(iod->assoc);
1409 /* *********************** NVME Ctrl Routines **************************** */
1412 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1414 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1417 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1419 struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1420 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1422 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1423 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1424 nvmet_fc_free_ls_iod(tgtport, iod);
1425 nvmet_fc_tgtport_put(tgtport);
1429 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1430 struct nvmet_fc_ls_iod *iod)
1434 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1435 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1437 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1439 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1443 * Actual processing routine for received FC-NVME LS Requests from the LLD
1446 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1447 struct nvmet_fc_ls_iod *iod)
1449 struct fcnvme_ls_rqst_w0 *w0 =
1450 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1452 iod->lsreq->nvmet_fc_private = iod;
1453 iod->lsreq->rspbuf = iod->rspbuf;
1454 iod->lsreq->rspdma = iod->rspdma;
1455 iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1456 /* Be preventative. handlers will later set to valid length */
1457 iod->lsreq->rsplen = 0;
1463 * parse request input, execute the request, and format the
1466 switch (w0->ls_cmd) {
1467 case FCNVME_LS_CREATE_ASSOCIATION:
1468 /* Creates Association and initial Admin Queue/Connection */
1469 nvmet_fc_ls_create_association(tgtport, iod);
1471 case FCNVME_LS_CREATE_CONNECTION:
1472 /* Creates an IO Queue/Connection */
1473 nvmet_fc_ls_create_connection(tgtport, iod);
1475 case FCNVME_LS_DISCONNECT:
1476 /* Terminate a Queue/Connection or the Association */
1477 nvmet_fc_ls_disconnect(tgtport, iod);
1480 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1481 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1482 ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
1485 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1489 * Actual processing routine for received FC-NVME LS Requests from the LLD
1492 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1494 struct nvmet_fc_ls_iod *iod =
1495 container_of(work, struct nvmet_fc_ls_iod, work);
1496 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1498 nvmet_fc_handle_ls_rqst(tgtport, iod);
1503 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1504 * upon the reception of a NVME LS request.
1506 * The nvmet-fc layer will copy payload to an internal structure for
1507 * processing. As such, upon completion of the routine, the LLDD may
1508 * immediately free/reuse the LS request buffer passed in the call.
1510 * If this routine returns error, the LLDD should abort the exchange.
1512 * @tgtport: pointer to the (registered) target port the LS was
1514 * @lsreq: pointer to a lsreq request structure to be used to reference
1515 * the exchange corresponding to the LS.
1516 * @lsreqbuf: pointer to the buffer containing the LS Request
1517 * @lsreqbuf_len: length, in bytes, of the received LS request
1520 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1521 struct nvmefc_tgt_ls_req *lsreq,
1522 void *lsreqbuf, u32 lsreqbuf_len)
1524 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1525 struct nvmet_fc_ls_iod *iod;
1527 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1530 if (!nvmet_fc_tgtport_get(tgtport))
1533 iod = nvmet_fc_alloc_ls_iod(tgtport);
1535 nvmet_fc_tgtport_put(tgtport);
1541 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1542 iod->rqstdatalen = lsreqbuf_len;
1544 schedule_work(&iod->work);
1548 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1552 * **********************
1553 * Start of FCP handling
1554 * **********************
1558 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1560 struct scatterlist *sg;
1563 u32 page_len, length;
1566 length = fod->total_length;
1567 nent = DIV_ROUND_UP(length, PAGE_SIZE);
1568 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1572 sg_init_table(sg, nent);
1575 page_len = min_t(u32, length, PAGE_SIZE);
1577 page = alloc_page(GFP_KERNEL);
1579 goto out_free_pages;
1581 sg_set_page(&sg[i], page, page_len, 0);
1587 fod->data_sg_cnt = nent;
1588 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1589 ((fod->io_dir == NVMET_FCP_WRITE) ?
1590 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1591 /* note: write from initiator perspective */
1598 __free_page(sg_page(&sg[i]));
1601 fod->data_sg = NULL;
1602 fod->data_sg_cnt = 0;
1604 return NVME_SC_INTERNAL;
1608 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1610 struct scatterlist *sg;
1613 if (!fod->data_sg || !fod->data_sg_cnt)
1616 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1617 ((fod->io_dir == NVMET_FCP_WRITE) ?
1618 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1619 for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1620 __free_page(sg_page(sg));
1621 kfree(fod->data_sg);
1626 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1630 /* egad, this is ugly. And sqtail is just a best guess */
1631 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1633 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1634 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1639 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1642 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1643 struct nvmet_fc_fcp_iod *fod)
1645 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1646 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1647 struct nvme_completion *cqe = &ersp->cqe;
1648 u32 *cqewd = (u32 *)cqe;
1649 bool send_ersp = false;
1650 u32 rsn, rspcnt, xfr_length;
1652 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1653 xfr_length = fod->total_length;
1655 xfr_length = fod->offset;
1658 * check to see if we can send a 0's rsp.
1659 * Note: to send a 0's response, the NVME-FC host transport will
1660 * recreate the CQE. The host transport knows: sq id, SQHD (last
1661 * seen in an ersp), and command_id. Thus it will create a
1662 * zero-filled CQE with those known fields filled in. Transport
1663 * must send an ersp for any condition where the cqe won't match
1666 * Here are the FC-NVME mandated cases where we must send an ersp:
1667 * every N responses, where N=ersp_ratio
1668 * force fabric commands to send ersp's (not in FC-NVME but good
1670 * normal cmds: any time status is non-zero, or status is zero
1671 * but words 0 or 1 are non-zero.
1672 * the SQ is 90% or more full
1673 * the cmd is a fused command
1674 * transferred data length not equal to cmd iu length
1676 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1677 if (!(rspcnt % fod->queue->ersp_ratio) ||
1678 sqe->opcode == nvme_fabrics_command ||
1679 xfr_length != fod->total_length ||
1680 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1681 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1682 queue_90percent_full(fod->queue, cqe->sq_head))
1685 /* re-set the fields */
1686 fod->fcpreq->rspaddr = ersp;
1687 fod->fcpreq->rspdma = fod->rspdma;
1690 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1691 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1693 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1694 rsn = atomic_inc_return(&fod->queue->rsn);
1695 ersp->rsn = cpu_to_be32(rsn);
1696 ersp->xfrd_len = cpu_to_be32(xfr_length);
1697 fod->fcpreq->rsplen = sizeof(*ersp);
1700 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1701 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1704 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1707 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1708 struct nvmet_fc_fcp_iod *fod)
1712 fod->fcpreq->op = NVMET_FCOP_RSP;
1713 fod->fcpreq->timeout = 0;
1715 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1717 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1719 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1723 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1724 struct nvmet_fc_fcp_iod *fod, u8 op)
1726 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1727 struct scatterlist *sg, *datasg;
1732 fcpreq->offset = fod->offset;
1733 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1734 tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1735 (fod->total_length - fod->offset));
1736 tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1737 tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1739 fcpreq->transfer_length = tlen;
1740 fcpreq->transferred_length = 0;
1741 fcpreq->fcp_error = 0;
1746 datasg = fod->next_sg;
1747 sg_off = fod->next_sg_offset;
1749 for (sg = fcpreq->sg ; tlen; sg++) {
1752 sg->offset += sg_off;
1753 sg->length -= sg_off;
1754 sg->dma_address += sg_off;
1757 if (tlen < sg->length) {
1759 fod->next_sg = datasg;
1760 fod->next_sg_offset += tlen;
1761 } else if (tlen == sg->length) {
1762 fod->next_sg_offset = 0;
1763 fod->next_sg = sg_next(datasg);
1765 fod->next_sg_offset = 0;
1766 datasg = sg_next(datasg);
1773 * If the last READDATA request: check if LLDD supports
1774 * combined xfr with response.
1776 if ((op == NVMET_FCOP_READDATA) &&
1777 ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1778 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1779 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1780 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1783 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1786 * should be ok to set w/o lock as its in the thread of
1787 * execution (not an async timer routine) and doesn't
1788 * contend with any clearing action
1792 if (op == NVMET_FCOP_WRITEDATA)
1793 nvmet_req_complete(&fod->req,
1794 NVME_SC_FC_TRANSPORT_ERROR);
1795 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1796 fcpreq->fcp_error = ret;
1797 fcpreq->transferred_length = 0;
1798 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1804 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1806 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1807 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1808 unsigned long flags;
1811 spin_lock_irqsave(&fod->flock, flags);
1813 spin_unlock_irqrestore(&fod->flock, flags);
1815 /* if in the middle of an io and we need to tear down */
1816 if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1817 /* data no longer needed */
1818 nvmet_fc_free_tgt_pgs(fod);
1820 if (fcpreq->fcp_error || abort)
1821 nvmet_req_complete(&fod->req, fcpreq->fcp_error);
1826 switch (fcpreq->op) {
1828 case NVMET_FCOP_WRITEDATA:
1829 if (abort || fcpreq->fcp_error ||
1830 fcpreq->transferred_length != fcpreq->transfer_length) {
1831 nvmet_req_complete(&fod->req,
1832 NVME_SC_FC_TRANSPORT_ERROR);
1836 fod->offset += fcpreq->transferred_length;
1837 if (fod->offset != fod->total_length) {
1838 /* transfer the next chunk */
1839 nvmet_fc_transfer_fcp_data(tgtport, fod,
1840 NVMET_FCOP_WRITEDATA);
1844 /* data transfer complete, resume with nvmet layer */
1846 fod->req.execute(&fod->req);
1850 case NVMET_FCOP_READDATA:
1851 case NVMET_FCOP_READDATA_RSP:
1852 if (abort || fcpreq->fcp_error ||
1853 fcpreq->transferred_length != fcpreq->transfer_length) {
1854 /* data no longer needed */
1855 nvmet_fc_free_tgt_pgs(fod);
1857 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1863 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1864 /* data no longer needed */
1865 nvmet_fc_free_tgt_pgs(fod);
1866 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1867 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1868 nvmet_fc_free_fcp_iod(fod->queue, fod);
1872 fod->offset += fcpreq->transferred_length;
1873 if (fod->offset != fod->total_length) {
1874 /* transfer the next chunk */
1875 nvmet_fc_transfer_fcp_data(tgtport, fod,
1876 NVMET_FCOP_READDATA);
1880 /* data transfer complete, send response */
1882 /* data no longer needed */
1883 nvmet_fc_free_tgt_pgs(fod);
1885 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1889 case NVMET_FCOP_RSP:
1890 case NVMET_FCOP_ABORT:
1891 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1892 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1893 nvmet_fc_free_fcp_iod(fod->queue, fod);
1897 nvmet_fc_free_tgt_pgs(fod);
1898 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1904 * actual completion handler after execution by the nvmet layer
1907 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1908 struct nvmet_fc_fcp_iod *fod, int status)
1910 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1911 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1912 unsigned long flags;
1915 spin_lock_irqsave(&fod->flock, flags);
1917 spin_unlock_irqrestore(&fod->flock, flags);
1919 /* if we have a CQE, snoop the last sq_head value */
1921 fod->queue->sqhd = cqe->sq_head;
1924 /* data no longer needed */
1925 nvmet_fc_free_tgt_pgs(fod);
1927 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1931 /* if an error handling the cmd post initial parsing */
1933 /* fudge up a failed CQE status for our transport error */
1934 memset(cqe, 0, sizeof(*cqe));
1935 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
1936 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1937 cqe->command_id = sqe->command_id;
1938 cqe->status = cpu_to_le16(status);
1942 * try to push the data even if the SQE status is non-zero.
1943 * There may be a status where data still was intended to
1946 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1947 /* push the data over before sending rsp */
1948 nvmet_fc_transfer_fcp_data(tgtport, fod,
1949 NVMET_FCOP_READDATA);
1953 /* writes & no data - fall thru */
1956 /* data no longer needed */
1957 nvmet_fc_free_tgt_pgs(fod);
1959 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1964 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1966 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
1967 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1969 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
1974 * Actual processing routine for received FC-NVME LS Requests from the LLD
1977 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
1978 struct nvmet_fc_fcp_iod *fod)
1980 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
1984 * Fused commands are currently not supported in the linux
1987 * As such, the implementation of the FC transport does not
1988 * look at the fused commands and order delivery to the upper
1989 * layer until we have both based on csn.
1992 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
1994 fod->total_length = be32_to_cpu(cmdiu->data_len);
1995 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
1996 fod->io_dir = NVMET_FCP_WRITE;
1997 if (!nvme_is_write(&cmdiu->sqe))
1998 goto transport_error;
1999 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2000 fod->io_dir = NVMET_FCP_READ;
2001 if (nvme_is_write(&cmdiu->sqe))
2002 goto transport_error;
2004 fod->io_dir = NVMET_FCP_NODATA;
2005 if (fod->total_length)
2006 goto transport_error;
2009 fod->req.cmd = &fod->cmdiubuf.sqe;
2010 fod->req.rsp = &fod->rspiubuf.cqe;
2011 fod->req.port = fod->queue->port;
2013 /* ensure nvmet handlers will set cmd handler callback */
2014 fod->req.execute = NULL;
2016 /* clear any response payload */
2017 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2019 ret = nvmet_req_init(&fod->req,
2020 &fod->queue->nvme_cq,
2021 &fod->queue->nvme_sq,
2022 &nvmet_fc_tgt_fcp_ops);
2023 if (!ret) { /* bad SQE content */
2024 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2028 /* keep a running counter of tail position */
2029 atomic_inc(&fod->queue->sqtail);
2031 fod->data_sg = NULL;
2032 fod->data_sg_cnt = 0;
2033 if (fod->total_length) {
2034 ret = nvmet_fc_alloc_tgt_pgs(fod);
2036 nvmet_req_complete(&fod->req, ret);
2040 fod->req.sg = fod->data_sg;
2041 fod->req.sg_cnt = fod->data_sg_cnt;
2043 fod->next_sg = fod->data_sg;
2044 fod->next_sg_offset = 0;
2046 if (fod->io_dir == NVMET_FCP_WRITE) {
2047 /* pull the data over before invoking nvmet layer */
2048 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2055 * can invoke the nvmet_layer now. If read data, cmd completion will
2059 fod->req.execute(&fod->req);
2064 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2068 * Actual processing routine for received FC-NVME LS Requests from the LLD
2071 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2073 struct nvmet_fc_fcp_iod *fod =
2074 container_of(work, struct nvmet_fc_fcp_iod, work);
2075 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2077 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2081 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2082 * upon the reception of a NVME FCP CMD IU.
2084 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2085 * layer for processing.
2087 * The nvmet-fc layer will copy cmd payload to an internal structure for
2088 * processing. As such, upon completion of the routine, the LLDD may
2089 * immediately free/reuse the CMD IU buffer passed in the call.
2091 * If this routine returns error, the lldd should abort the exchange.
2093 * @target_port: pointer to the (registered) target port the FCP CMD IU
2095 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2096 * the exchange corresponding to the FCP Exchange.
2097 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2098 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2101 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2102 struct nvmefc_tgt_fcp_req *fcpreq,
2103 void *cmdiubuf, u32 cmdiubuf_len)
2105 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2106 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2107 struct nvmet_fc_tgt_queue *queue;
2108 struct nvmet_fc_fcp_iod *fod;
2110 /* validate iu, so the connection id can be used to find the queue */
2111 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2112 (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2113 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2114 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2118 queue = nvmet_fc_find_target_queue(tgtport,
2119 be64_to_cpu(cmdiu->connection_id));
2124 * note: reference taken by find_target_queue
2125 * After successful fod allocation, the fod will inherit the
2126 * ownership of that reference and will remove the reference
2127 * when the fod is freed.
2130 fod = nvmet_fc_alloc_fcp_iod(queue);
2132 /* release the queue lookup reference */
2133 nvmet_fc_tgt_q_put(queue);
2137 fcpreq->nvmet_fc_private = fod;
2138 fod->fcpreq = fcpreq;
2140 * put all admin cmds on hw queue id 0. All io commands go to
2141 * the respective hw queue based on a modulo basis
2143 fcpreq->hwqid = queue->qid ?
2144 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2145 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2147 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2151 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2155 FCT_TRADDR_WWNN = 1 << 0,
2156 FCT_TRADDR_WWPN = 1 << 1,
2159 struct nvmet_fc_traddr {
2164 static const match_table_t traddr_opt_tokens = {
2165 { FCT_TRADDR_WWNN, "nn-%s" },
2166 { FCT_TRADDR_WWPN, "pn-%s" },
2167 { FCT_TRADDR_ERR, NULL }
2171 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2173 substring_t args[MAX_OPT_ARGS];
2174 char *options, *o, *p;
2178 options = o = kstrdup(buf, GFP_KERNEL);
2182 while ((p = strsep(&o, ",\n")) != NULL) {
2186 token = match_token(p, traddr_opt_tokens, args);
2188 case FCT_TRADDR_WWNN:
2189 if (match_u64(args, &token64)) {
2193 traddr->nn = token64;
2195 case FCT_TRADDR_WWPN:
2196 if (match_u64(args, &token64)) {
2200 traddr->pn = token64;
2203 pr_warn("unknown traddr token or missing value '%s'\n",
2216 nvmet_fc_add_port(struct nvmet_port *port)
2218 struct nvmet_fc_tgtport *tgtport;
2219 struct nvmet_fc_traddr traddr = { 0L, 0L };
2220 unsigned long flags;
2223 /* validate the address info */
2224 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2225 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2228 /* map the traddr address info to a target port */
2230 ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2235 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2236 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2237 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2238 (tgtport->fc_target_port.port_name == traddr.pn)) {
2239 /* a FC port can only be 1 nvmet port id */
2240 if (!tgtport->port) {
2241 tgtport->port = port;
2242 port->priv = tgtport;
2249 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2254 nvmet_fc_remove_port(struct nvmet_port *port)
2256 struct nvmet_fc_tgtport *tgtport = port->priv;
2257 unsigned long flags;
2259 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2260 if (tgtport->port == port) {
2261 nvmet_fc_tgtport_put(tgtport);
2262 tgtport->port = NULL;
2264 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2267 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2268 .owner = THIS_MODULE,
2269 .type = NVMF_TRTYPE_FC,
2271 .add_port = nvmet_fc_add_port,
2272 .remove_port = nvmet_fc_remove_port,
2273 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2274 .delete_ctrl = nvmet_fc_delete_ctrl,
2277 static int __init nvmet_fc_init_module(void)
2279 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2282 static void __exit nvmet_fc_exit_module(void)
2284 /* sanity check - all lports should be removed */
2285 if (!list_empty(&nvmet_fc_target_list))
2286 pr_warn("%s: targetport list not empty\n", __func__);
2288 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2290 ida_destroy(&nvmet_fc_tgtport_cnt);
2293 module_init(nvmet_fc_init_module);
2294 module_exit(nvmet_fc_exit_module);
2296 MODULE_LICENSE("GPL v2");