]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/nvme/target/fc.c
nvmet_fc: add target feature flags for upcall isr contexts
[karo-tx-linux.git] / drivers / nvme / target / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT              4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE              2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43         struct nvmefc_tgt_ls_req        *lsreq;
44         struct nvmefc_tgt_fcp_req       *fcpreq;        /* only if RS */
45
46         struct list_head                ls_list;        /* tgtport->ls_list */
47
48         struct nvmet_fc_tgtport         *tgtport;
49         struct nvmet_fc_tgt_assoc       *assoc;
50
51         u8                              *rqstbuf;
52         u8                              *rspbuf;
53         u16                             rqstdatalen;
54         dma_addr_t                      rspdma;
55
56         struct scatterlist              sg[2];
57
58         struct work_struct              work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_KB_PER_XFR         256
62
63 enum nvmet_fcp_datadir {
64         NVMET_FCP_NODATA,
65         NVMET_FCP_WRITE,
66         NVMET_FCP_READ,
67         NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71         struct nvmefc_tgt_fcp_req       *fcpreq;
72
73         struct nvme_fc_cmd_iu           cmdiubuf;
74         struct nvme_fc_ersp_iu          rspiubuf;
75         dma_addr_t                      rspdma;
76         struct scatterlist              *data_sg;
77         struct scatterlist              *next_sg;
78         int                             data_sg_cnt;
79         u32                             next_sg_offset;
80         u32                             total_length;
81         u32                             offset;
82         enum nvmet_fcp_datadir          io_dir;
83         bool                            active;
84         bool                            abort;
85         spinlock_t                      flock;
86
87         struct nvmet_req                req;
88         struct work_struct              work;
89         struct work_struct              done_work;
90
91         struct nvmet_fc_tgtport         *tgtport;
92         struct nvmet_fc_tgt_queue       *queue;
93
94         struct list_head                fcp_list;       /* tgtport->fcp_list */
95 };
96
97 struct nvmet_fc_tgtport {
98
99         struct nvmet_fc_target_port     fc_target_port;
100
101         struct list_head                tgt_list; /* nvmet_fc_target_list */
102         struct device                   *dev;   /* dev for dma mapping */
103         struct nvmet_fc_target_template *ops;
104
105         struct nvmet_fc_ls_iod          *iod;
106         spinlock_t                      lock;
107         struct list_head                ls_list;
108         struct list_head                ls_busylist;
109         struct list_head                assoc_list;
110         struct ida                      assoc_cnt;
111         struct nvmet_port               *port;
112         struct kref                     ref;
113 };
114
115 struct nvmet_fc_tgt_queue {
116         bool                            ninetypercent;
117         u16                             qid;
118         u16                             sqsize;
119         u16                             ersp_ratio;
120         u16                             sqhd;
121         int                             cpu;
122         atomic_t                        connected;
123         atomic_t                        sqtail;
124         atomic_t                        zrspcnt;
125         atomic_t                        rsn;
126         spinlock_t                      qlock;
127         struct nvmet_port               *port;
128         struct nvmet_cq                 nvme_cq;
129         struct nvmet_sq                 nvme_sq;
130         struct nvmet_fc_tgt_assoc       *assoc;
131         struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
132         struct list_head                fod_list;
133         struct workqueue_struct         *work_q;
134         struct kref                     ref;
135 } __aligned(sizeof(unsigned long long));
136
137 struct nvmet_fc_tgt_assoc {
138         u64                             association_id;
139         u32                             a_id;
140         struct nvmet_fc_tgtport         *tgtport;
141         struct list_head                a_list;
142         struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES];
143         struct kref                     ref;
144 };
145
146
147 static inline int
148 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
149 {
150         return (iodptr - iodptr->tgtport->iod);
151 }
152
153 static inline int
154 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
155 {
156         return (fodptr - fodptr->queue->fod);
157 }
158
159
160 /*
161  * Association and Connection IDs:
162  *
163  * Association ID will have random number in upper 6 bytes and zero
164  *   in lower 2 bytes
165  *
166  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
167  *
168  * note: Association ID = Connection ID for queue 0
169  */
170 #define BYTES_FOR_QID                   sizeof(u16)
171 #define BYTES_FOR_QID_SHIFT             (BYTES_FOR_QID * 8)
172 #define NVMET_FC_QUEUEID_MASK           ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
173
174 static inline u64
175 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
176 {
177         return (assoc->association_id | qid);
178 }
179
180 static inline u64
181 nvmet_fc_getassociationid(u64 connectionid)
182 {
183         return connectionid & ~NVMET_FC_QUEUEID_MASK;
184 }
185
186 static inline u16
187 nvmet_fc_getqueueid(u64 connectionid)
188 {
189         return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
190 }
191
192 static inline struct nvmet_fc_tgtport *
193 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
194 {
195         return container_of(targetport, struct nvmet_fc_tgtport,
196                                  fc_target_port);
197 }
198
199 static inline struct nvmet_fc_fcp_iod *
200 nvmet_req_to_fod(struct nvmet_req *nvme_req)
201 {
202         return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
203 }
204
205
206 /* *************************** Globals **************************** */
207
208
209 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
210
211 static LIST_HEAD(nvmet_fc_target_list);
212 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
213
214
215 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
216 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
217 static void nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work);
218 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
219 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
220 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
221 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
222 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
223 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
224
225
226 /* *********************** FC-NVME DMA Handling **************************** */
227
228 /*
229  * The fcloop device passes in a NULL device pointer. Real LLD's will
230  * pass in a valid device pointer. If NULL is passed to the dma mapping
231  * routines, depending on the platform, it may or may not succeed, and
232  * may crash.
233  *
234  * As such:
235  * Wrapper all the dma routines and check the dev pointer.
236  *
237  * If simple mappings (return just a dma address, we'll noop them,
238  * returning a dma address of 0.
239  *
240  * On more complex mappings (dma_map_sg), a pseudo routine fills
241  * in the scatter list, setting all dma addresses to 0.
242  */
243
244 static inline dma_addr_t
245 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
246                 enum dma_data_direction dir)
247 {
248         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
249 }
250
251 static inline int
252 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
253 {
254         return dev ? dma_mapping_error(dev, dma_addr) : 0;
255 }
256
257 static inline void
258 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
259         enum dma_data_direction dir)
260 {
261         if (dev)
262                 dma_unmap_single(dev, addr, size, dir);
263 }
264
265 static inline void
266 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
267                 enum dma_data_direction dir)
268 {
269         if (dev)
270                 dma_sync_single_for_cpu(dev, addr, size, dir);
271 }
272
273 static inline void
274 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
275                 enum dma_data_direction dir)
276 {
277         if (dev)
278                 dma_sync_single_for_device(dev, addr, size, dir);
279 }
280
281 /* pseudo dma_map_sg call */
282 static int
283 fc_map_sg(struct scatterlist *sg, int nents)
284 {
285         struct scatterlist *s;
286         int i;
287
288         WARN_ON(nents == 0 || sg[0].length == 0);
289
290         for_each_sg(sg, s, nents, i) {
291                 s->dma_address = 0L;
292 #ifdef CONFIG_NEED_SG_DMA_LENGTH
293                 s->dma_length = s->length;
294 #endif
295         }
296         return nents;
297 }
298
299 static inline int
300 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
301                 enum dma_data_direction dir)
302 {
303         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
304 }
305
306 static inline void
307 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
308                 enum dma_data_direction dir)
309 {
310         if (dev)
311                 dma_unmap_sg(dev, sg, nents, dir);
312 }
313
314
315 /* *********************** FC-NVME Port Management ************************ */
316
317
318 static int
319 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
320 {
321         struct nvmet_fc_ls_iod *iod;
322         int i;
323
324         iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
325                         GFP_KERNEL);
326         if (!iod)
327                 return -ENOMEM;
328
329         tgtport->iod = iod;
330
331         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
332                 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
333                 iod->tgtport = tgtport;
334                 list_add_tail(&iod->ls_list, &tgtport->ls_list);
335
336                 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
337                         GFP_KERNEL);
338                 if (!iod->rqstbuf)
339                         goto out_fail;
340
341                 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
342
343                 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
344                                                 NVME_FC_MAX_LS_BUFFER_SIZE,
345                                                 DMA_TO_DEVICE);
346                 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
347                         goto out_fail;
348         }
349
350         return 0;
351
352 out_fail:
353         kfree(iod->rqstbuf);
354         list_del(&iod->ls_list);
355         for (iod--, i--; i >= 0; iod--, i--) {
356                 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
357                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
358                 kfree(iod->rqstbuf);
359                 list_del(&iod->ls_list);
360         }
361
362         kfree(iod);
363
364         return -EFAULT;
365 }
366
367 static void
368 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
369 {
370         struct nvmet_fc_ls_iod *iod = tgtport->iod;
371         int i;
372
373         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
374                 fc_dma_unmap_single(tgtport->dev,
375                                 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
376                                 DMA_TO_DEVICE);
377                 kfree(iod->rqstbuf);
378                 list_del(&iod->ls_list);
379         }
380         kfree(tgtport->iod);
381 }
382
383 static struct nvmet_fc_ls_iod *
384 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
385 {
386         static struct nvmet_fc_ls_iod *iod;
387         unsigned long flags;
388
389         spin_lock_irqsave(&tgtport->lock, flags);
390         iod = list_first_entry_or_null(&tgtport->ls_list,
391                                         struct nvmet_fc_ls_iod, ls_list);
392         if (iod)
393                 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
394         spin_unlock_irqrestore(&tgtport->lock, flags);
395         return iod;
396 }
397
398
399 static void
400 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
401                         struct nvmet_fc_ls_iod *iod)
402 {
403         unsigned long flags;
404
405         spin_lock_irqsave(&tgtport->lock, flags);
406         list_move(&iod->ls_list, &tgtport->ls_list);
407         spin_unlock_irqrestore(&tgtport->lock, flags);
408 }
409
410 static void
411 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
412                                 struct nvmet_fc_tgt_queue *queue)
413 {
414         struct nvmet_fc_fcp_iod *fod = queue->fod;
415         int i;
416
417         for (i = 0; i < queue->sqsize; fod++, i++) {
418                 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
419                 INIT_WORK(&fod->done_work, nvmet_fc_fcp_rqst_op_done_work);
420                 fod->tgtport = tgtport;
421                 fod->queue = queue;
422                 fod->active = false;
423                 list_add_tail(&fod->fcp_list, &queue->fod_list);
424                 spin_lock_init(&fod->flock);
425
426                 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
427                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
428                 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
429                         list_del(&fod->fcp_list);
430                         for (fod--, i--; i >= 0; fod--, i--) {
431                                 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
432                                                 sizeof(fod->rspiubuf),
433                                                 DMA_TO_DEVICE);
434                                 fod->rspdma = 0L;
435                                 list_del(&fod->fcp_list);
436                         }
437
438                         return;
439                 }
440         }
441 }
442
443 static void
444 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
445                                 struct nvmet_fc_tgt_queue *queue)
446 {
447         struct nvmet_fc_fcp_iod *fod = queue->fod;
448         int i;
449
450         for (i = 0; i < queue->sqsize; fod++, i++) {
451                 if (fod->rspdma)
452                         fc_dma_unmap_single(tgtport->dev, fod->rspdma,
453                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
454         }
455 }
456
457 static struct nvmet_fc_fcp_iod *
458 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
459 {
460         static struct nvmet_fc_fcp_iod *fod;
461         unsigned long flags;
462
463         spin_lock_irqsave(&queue->qlock, flags);
464         fod = list_first_entry_or_null(&queue->fod_list,
465                                         struct nvmet_fc_fcp_iod, fcp_list);
466         if (fod) {
467                 list_del(&fod->fcp_list);
468                 fod->active = true;
469                 fod->abort = false;
470                 /*
471                  * no queue reference is taken, as it was taken by the
472                  * queue lookup just prior to the allocation. The iod
473                  * will "inherit" that reference.
474                  */
475         }
476         spin_unlock_irqrestore(&queue->qlock, flags);
477         return fod;
478 }
479
480
481 static void
482 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
483                         struct nvmet_fc_fcp_iod *fod)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&queue->qlock, flags);
488         list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
489         fod->active = false;
490         spin_unlock_irqrestore(&queue->qlock, flags);
491
492         /*
493          * release the reference taken at queue lookup and fod allocation
494          */
495         nvmet_fc_tgt_q_put(queue);
496 }
497
498 static int
499 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
500 {
501         int cpu, idx, cnt;
502
503         if (!(tgtport->ops->target_features &
504                         NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
505             tgtport->ops->max_hw_queues == 1)
506                 return WORK_CPU_UNBOUND;
507
508         /* Simple cpu selection based on qid modulo active cpu count */
509         idx = !qid ? 0 : (qid - 1) % num_active_cpus();
510
511         /* find the n'th active cpu */
512         for (cpu = 0, cnt = 0; ; ) {
513                 if (cpu_active(cpu)) {
514                         if (cnt == idx)
515                                 break;
516                         cnt++;
517                 }
518                 cpu = (cpu + 1) % num_possible_cpus();
519         }
520
521         return cpu;
522 }
523
524 static struct nvmet_fc_tgt_queue *
525 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
526                         u16 qid, u16 sqsize)
527 {
528         struct nvmet_fc_tgt_queue *queue;
529         unsigned long flags;
530         int ret;
531
532         if (qid >= NVMET_NR_QUEUES)
533                 return NULL;
534
535         queue = kzalloc((sizeof(*queue) +
536                                 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
537                                 GFP_KERNEL);
538         if (!queue)
539                 return NULL;
540
541         if (!nvmet_fc_tgt_a_get(assoc))
542                 goto out_free_queue;
543
544         queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
545                                 assoc->tgtport->fc_target_port.port_num,
546                                 assoc->a_id, qid);
547         if (!queue->work_q)
548                 goto out_a_put;
549
550         queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
551         queue->qid = qid;
552         queue->sqsize = sqsize;
553         queue->assoc = assoc;
554         queue->port = assoc->tgtport->port;
555         queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
556         INIT_LIST_HEAD(&queue->fod_list);
557         atomic_set(&queue->connected, 0);
558         atomic_set(&queue->sqtail, 0);
559         atomic_set(&queue->rsn, 1);
560         atomic_set(&queue->zrspcnt, 0);
561         spin_lock_init(&queue->qlock);
562         kref_init(&queue->ref);
563
564         nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
565
566         ret = nvmet_sq_init(&queue->nvme_sq);
567         if (ret)
568                 goto out_fail_iodlist;
569
570         WARN_ON(assoc->queues[qid]);
571         spin_lock_irqsave(&assoc->tgtport->lock, flags);
572         assoc->queues[qid] = queue;
573         spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
574
575         return queue;
576
577 out_fail_iodlist:
578         nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
579         destroy_workqueue(queue->work_q);
580 out_a_put:
581         nvmet_fc_tgt_a_put(assoc);
582 out_free_queue:
583         kfree(queue);
584         return NULL;
585 }
586
587
588 static void
589 nvmet_fc_tgt_queue_free(struct kref *ref)
590 {
591         struct nvmet_fc_tgt_queue *queue =
592                 container_of(ref, struct nvmet_fc_tgt_queue, ref);
593         unsigned long flags;
594
595         spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
596         queue->assoc->queues[queue->qid] = NULL;
597         spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
598
599         nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
600
601         nvmet_fc_tgt_a_put(queue->assoc);
602
603         destroy_workqueue(queue->work_q);
604
605         kfree(queue);
606 }
607
608 static void
609 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
610 {
611         kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
612 }
613
614 static int
615 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
616 {
617         return kref_get_unless_zero(&queue->ref);
618 }
619
620
621 static void
622 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
623                                 struct nvmefc_tgt_fcp_req *fcpreq)
624 {
625         int ret;
626
627         fcpreq->op = NVMET_FCOP_ABORT;
628         fcpreq->offset = 0;
629         fcpreq->timeout = 0;
630         fcpreq->transfer_length = 0;
631         fcpreq->transferred_length = 0;
632         fcpreq->fcp_error = 0;
633         fcpreq->sg_cnt = 0;
634
635         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
636         if (ret)
637                 /* should never reach here !! */
638                 WARN_ON(1);
639 }
640
641
642 static void
643 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
644 {
645         struct nvmet_fc_fcp_iod *fod = queue->fod;
646         unsigned long flags;
647         int i;
648         bool disconnect;
649
650         disconnect = atomic_xchg(&queue->connected, 0);
651
652         spin_lock_irqsave(&queue->qlock, flags);
653         /* about outstanding io's */
654         for (i = 0; i < queue->sqsize; fod++, i++) {
655                 if (fod->active) {
656                         spin_lock(&fod->flock);
657                         fod->abort = true;
658                         spin_unlock(&fod->flock);
659                 }
660         }
661         spin_unlock_irqrestore(&queue->qlock, flags);
662
663         flush_workqueue(queue->work_q);
664
665         if (disconnect)
666                 nvmet_sq_destroy(&queue->nvme_sq);
667
668         nvmet_fc_tgt_q_put(queue);
669 }
670
671 static struct nvmet_fc_tgt_queue *
672 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
673                                 u64 connection_id)
674 {
675         struct nvmet_fc_tgt_assoc *assoc;
676         struct nvmet_fc_tgt_queue *queue;
677         u64 association_id = nvmet_fc_getassociationid(connection_id);
678         u16 qid = nvmet_fc_getqueueid(connection_id);
679         unsigned long flags;
680
681         spin_lock_irqsave(&tgtport->lock, flags);
682         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
683                 if (association_id == assoc->association_id) {
684                         queue = assoc->queues[qid];
685                         if (queue &&
686                             (!atomic_read(&queue->connected) ||
687                              !nvmet_fc_tgt_q_get(queue)))
688                                 queue = NULL;
689                         spin_unlock_irqrestore(&tgtport->lock, flags);
690                         return queue;
691                 }
692         }
693         spin_unlock_irqrestore(&tgtport->lock, flags);
694         return NULL;
695 }
696
697 static struct nvmet_fc_tgt_assoc *
698 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
699 {
700         struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
701         unsigned long flags;
702         u64 ran;
703         int idx;
704         bool needrandom = true;
705
706         assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
707         if (!assoc)
708                 return NULL;
709
710         idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
711         if (idx < 0)
712                 goto out_free_assoc;
713
714         if (!nvmet_fc_tgtport_get(tgtport))
715                 goto out_ida_put;
716
717         assoc->tgtport = tgtport;
718         assoc->a_id = idx;
719         INIT_LIST_HEAD(&assoc->a_list);
720         kref_init(&assoc->ref);
721
722         while (needrandom) {
723                 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
724                 ran = ran << BYTES_FOR_QID_SHIFT;
725
726                 spin_lock_irqsave(&tgtport->lock, flags);
727                 needrandom = false;
728                 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
729                         if (ran == tmpassoc->association_id) {
730                                 needrandom = true;
731                                 break;
732                         }
733                 if (!needrandom) {
734                         assoc->association_id = ran;
735                         list_add_tail(&assoc->a_list, &tgtport->assoc_list);
736                 }
737                 spin_unlock_irqrestore(&tgtport->lock, flags);
738         }
739
740         return assoc;
741
742 out_ida_put:
743         ida_simple_remove(&tgtport->assoc_cnt, idx);
744 out_free_assoc:
745         kfree(assoc);
746         return NULL;
747 }
748
749 static void
750 nvmet_fc_target_assoc_free(struct kref *ref)
751 {
752         struct nvmet_fc_tgt_assoc *assoc =
753                 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
754         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
755         unsigned long flags;
756
757         spin_lock_irqsave(&tgtport->lock, flags);
758         list_del(&assoc->a_list);
759         spin_unlock_irqrestore(&tgtport->lock, flags);
760         ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
761         kfree(assoc);
762         nvmet_fc_tgtport_put(tgtport);
763 }
764
765 static void
766 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
767 {
768         kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
769 }
770
771 static int
772 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
773 {
774         return kref_get_unless_zero(&assoc->ref);
775 }
776
777 static void
778 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
779 {
780         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
781         struct nvmet_fc_tgt_queue *queue;
782         unsigned long flags;
783         int i;
784
785         spin_lock_irqsave(&tgtport->lock, flags);
786         for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
787                 queue = assoc->queues[i];
788                 if (queue) {
789                         if (!nvmet_fc_tgt_q_get(queue))
790                                 continue;
791                         spin_unlock_irqrestore(&tgtport->lock, flags);
792                         nvmet_fc_delete_target_queue(queue);
793                         nvmet_fc_tgt_q_put(queue);
794                         spin_lock_irqsave(&tgtport->lock, flags);
795                 }
796         }
797         spin_unlock_irqrestore(&tgtport->lock, flags);
798
799         nvmet_fc_tgt_a_put(assoc);
800 }
801
802 static struct nvmet_fc_tgt_assoc *
803 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
804                                 u64 association_id)
805 {
806         struct nvmet_fc_tgt_assoc *assoc;
807         struct nvmet_fc_tgt_assoc *ret = NULL;
808         unsigned long flags;
809
810         spin_lock_irqsave(&tgtport->lock, flags);
811         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
812                 if (association_id == assoc->association_id) {
813                         ret = assoc;
814                         nvmet_fc_tgt_a_get(assoc);
815                         break;
816                 }
817         }
818         spin_unlock_irqrestore(&tgtport->lock, flags);
819
820         return ret;
821 }
822
823
824 /**
825  * nvme_fc_register_targetport - transport entry point called by an
826  *                              LLDD to register the existence of a local
827  *                              NVME subystem FC port.
828  * @pinfo:     pointer to information about the port to be registered
829  * @template:  LLDD entrypoints and operational parameters for the port
830  * @dev:       physical hardware device node port corresponds to. Will be
831  *             used for DMA mappings
832  * @portptr:   pointer to a local port pointer. Upon success, the routine
833  *             will allocate a nvme_fc_local_port structure and place its
834  *             address in the local port pointer. Upon failure, local port
835  *             pointer will be set to NULL.
836  *
837  * Returns:
838  * a completion status. Must be 0 upon success; a negative errno
839  * (ex: -ENXIO) upon failure.
840  */
841 int
842 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
843                         struct nvmet_fc_target_template *template,
844                         struct device *dev,
845                         struct nvmet_fc_target_port **portptr)
846 {
847         struct nvmet_fc_tgtport *newrec;
848         unsigned long flags;
849         int ret, idx;
850
851         if (!template->xmt_ls_rsp || !template->fcp_op ||
852             !template->targetport_delete ||
853             !template->max_hw_queues || !template->max_sgl_segments ||
854             !template->max_dif_sgl_segments || !template->dma_boundary) {
855                 ret = -EINVAL;
856                 goto out_regtgt_failed;
857         }
858
859         newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
860                          GFP_KERNEL);
861         if (!newrec) {
862                 ret = -ENOMEM;
863                 goto out_regtgt_failed;
864         }
865
866         idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
867         if (idx < 0) {
868                 ret = -ENOSPC;
869                 goto out_fail_kfree;
870         }
871
872         if (!get_device(dev) && dev) {
873                 ret = -ENODEV;
874                 goto out_ida_put;
875         }
876
877         newrec->fc_target_port.node_name = pinfo->node_name;
878         newrec->fc_target_port.port_name = pinfo->port_name;
879         newrec->fc_target_port.private = &newrec[1];
880         newrec->fc_target_port.port_id = pinfo->port_id;
881         newrec->fc_target_port.port_num = idx;
882         INIT_LIST_HEAD(&newrec->tgt_list);
883         newrec->dev = dev;
884         newrec->ops = template;
885         spin_lock_init(&newrec->lock);
886         INIT_LIST_HEAD(&newrec->ls_list);
887         INIT_LIST_HEAD(&newrec->ls_busylist);
888         INIT_LIST_HEAD(&newrec->assoc_list);
889         kref_init(&newrec->ref);
890         ida_init(&newrec->assoc_cnt);
891
892         ret = nvmet_fc_alloc_ls_iodlist(newrec);
893         if (ret) {
894                 ret = -ENOMEM;
895                 goto out_free_newrec;
896         }
897
898         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
899         list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
900         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
901
902         *portptr = &newrec->fc_target_port;
903         return 0;
904
905 out_free_newrec:
906         put_device(dev);
907 out_ida_put:
908         ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
909 out_fail_kfree:
910         kfree(newrec);
911 out_regtgt_failed:
912         *portptr = NULL;
913         return ret;
914 }
915 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
916
917
918 static void
919 nvmet_fc_free_tgtport(struct kref *ref)
920 {
921         struct nvmet_fc_tgtport *tgtport =
922                 container_of(ref, struct nvmet_fc_tgtport, ref);
923         struct device *dev = tgtport->dev;
924         unsigned long flags;
925
926         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
927         list_del(&tgtport->tgt_list);
928         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
929
930         nvmet_fc_free_ls_iodlist(tgtport);
931
932         /* let the LLDD know we've finished tearing it down */
933         tgtport->ops->targetport_delete(&tgtport->fc_target_port);
934
935         ida_simple_remove(&nvmet_fc_tgtport_cnt,
936                         tgtport->fc_target_port.port_num);
937
938         ida_destroy(&tgtport->assoc_cnt);
939
940         kfree(tgtport);
941
942         put_device(dev);
943 }
944
945 static void
946 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
947 {
948         kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
949 }
950
951 static int
952 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
953 {
954         return kref_get_unless_zero(&tgtport->ref);
955 }
956
957 static void
958 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
959 {
960         struct nvmet_fc_tgt_assoc *assoc, *next;
961         unsigned long flags;
962
963         spin_lock_irqsave(&tgtport->lock, flags);
964         list_for_each_entry_safe(assoc, next,
965                                 &tgtport->assoc_list, a_list) {
966                 if (!nvmet_fc_tgt_a_get(assoc))
967                         continue;
968                 spin_unlock_irqrestore(&tgtport->lock, flags);
969                 nvmet_fc_delete_target_assoc(assoc);
970                 nvmet_fc_tgt_a_put(assoc);
971                 spin_lock_irqsave(&tgtport->lock, flags);
972         }
973         spin_unlock_irqrestore(&tgtport->lock, flags);
974 }
975
976 /*
977  * nvmet layer has called to terminate an association
978  */
979 static void
980 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
981 {
982         struct nvmet_fc_tgtport *tgtport, *next;
983         struct nvmet_fc_tgt_assoc *assoc;
984         struct nvmet_fc_tgt_queue *queue;
985         unsigned long flags;
986         bool found_ctrl = false;
987
988         /* this is a bit ugly, but don't want to make locks layered */
989         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
990         list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
991                         tgt_list) {
992                 if (!nvmet_fc_tgtport_get(tgtport))
993                         continue;
994                 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
995
996                 spin_lock_irqsave(&tgtport->lock, flags);
997                 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
998                         queue = assoc->queues[0];
999                         if (queue && queue->nvme_sq.ctrl == ctrl) {
1000                                 if (nvmet_fc_tgt_a_get(assoc))
1001                                         found_ctrl = true;
1002                                 break;
1003                         }
1004                 }
1005                 spin_unlock_irqrestore(&tgtport->lock, flags);
1006
1007                 nvmet_fc_tgtport_put(tgtport);
1008
1009                 if (found_ctrl) {
1010                         nvmet_fc_delete_target_assoc(assoc);
1011                         nvmet_fc_tgt_a_put(assoc);
1012                         return;
1013                 }
1014
1015                 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1016         }
1017         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1018 }
1019
1020 /**
1021  * nvme_fc_unregister_targetport - transport entry point called by an
1022  *                              LLDD to deregister/remove a previously
1023  *                              registered a local NVME subsystem FC port.
1024  * @tgtport: pointer to the (registered) target port that is to be
1025  *           deregistered.
1026  *
1027  * Returns:
1028  * a completion status. Must be 0 upon success; a negative errno
1029  * (ex: -ENXIO) upon failure.
1030  */
1031 int
1032 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1033 {
1034         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1035
1036         /* terminate any outstanding associations */
1037         __nvmet_fc_free_assocs(tgtport);
1038
1039         nvmet_fc_tgtport_put(tgtport);
1040
1041         return 0;
1042 }
1043 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1044
1045
1046 /* *********************** FC-NVME LS Handling **************************** */
1047
1048
1049 static void
1050 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1051 {
1052         struct fcnvme_ls_acc_hdr *acc = buf;
1053
1054         acc->w0.ls_cmd = ls_cmd;
1055         acc->desc_list_len = desc_len;
1056         acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1057         acc->rqst.desc_len =
1058                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1059         acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1060 }
1061
1062 static int
1063 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1064                         u8 reason, u8 explanation, u8 vendor)
1065 {
1066         struct fcnvme_ls_rjt *rjt = buf;
1067
1068         nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1069                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1070                         ls_cmd);
1071         rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1072         rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1073         rjt->rjt.reason_code = reason;
1074         rjt->rjt.reason_explanation = explanation;
1075         rjt->rjt.vendor = vendor;
1076
1077         return sizeof(struct fcnvme_ls_rjt);
1078 }
1079
1080 /* Validation Error indexes into the string table below */
1081 enum {
1082         VERR_NO_ERROR           = 0,
1083         VERR_CR_ASSOC_LEN       = 1,
1084         VERR_CR_ASSOC_RQST_LEN  = 2,
1085         VERR_CR_ASSOC_CMD       = 3,
1086         VERR_CR_ASSOC_CMD_LEN   = 4,
1087         VERR_ERSP_RATIO         = 5,
1088         VERR_ASSOC_ALLOC_FAIL   = 6,
1089         VERR_QUEUE_ALLOC_FAIL   = 7,
1090         VERR_CR_CONN_LEN        = 8,
1091         VERR_CR_CONN_RQST_LEN   = 9,
1092         VERR_ASSOC_ID           = 10,
1093         VERR_ASSOC_ID_LEN       = 11,
1094         VERR_NO_ASSOC           = 12,
1095         VERR_CONN_ID            = 13,
1096         VERR_CONN_ID_LEN        = 14,
1097         VERR_NO_CONN            = 15,
1098         VERR_CR_CONN_CMD        = 16,
1099         VERR_CR_CONN_CMD_LEN    = 17,
1100         VERR_DISCONN_LEN        = 18,
1101         VERR_DISCONN_RQST_LEN   = 19,
1102         VERR_DISCONN_CMD        = 20,
1103         VERR_DISCONN_CMD_LEN    = 21,
1104         VERR_DISCONN_SCOPE      = 22,
1105         VERR_RS_LEN             = 23,
1106         VERR_RS_RQST_LEN        = 24,
1107         VERR_RS_CMD             = 25,
1108         VERR_RS_CMD_LEN         = 26,
1109         VERR_RS_RCTL            = 27,
1110         VERR_RS_RO              = 28,
1111 };
1112
1113 static char *validation_errors[] = {
1114         "OK",
1115         "Bad CR_ASSOC Length",
1116         "Bad CR_ASSOC Rqst Length",
1117         "Not CR_ASSOC Cmd",
1118         "Bad CR_ASSOC Cmd Length",
1119         "Bad Ersp Ratio",
1120         "Association Allocation Failed",
1121         "Queue Allocation Failed",
1122         "Bad CR_CONN Length",
1123         "Bad CR_CONN Rqst Length",
1124         "Not Association ID",
1125         "Bad Association ID Length",
1126         "No Association",
1127         "Not Connection ID",
1128         "Bad Connection ID Length",
1129         "No Connection",
1130         "Not CR_CONN Cmd",
1131         "Bad CR_CONN Cmd Length",
1132         "Bad DISCONN Length",
1133         "Bad DISCONN Rqst Length",
1134         "Not DISCONN Cmd",
1135         "Bad DISCONN Cmd Length",
1136         "Bad Disconnect Scope",
1137         "Bad RS Length",
1138         "Bad RS Rqst Length",
1139         "Not RS Cmd",
1140         "Bad RS Cmd Length",
1141         "Bad RS R_CTL",
1142         "Bad RS Relative Offset",
1143 };
1144
1145 static void
1146 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1147                         struct nvmet_fc_ls_iod *iod)
1148 {
1149         struct fcnvme_ls_cr_assoc_rqst *rqst =
1150                                 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1151         struct fcnvme_ls_cr_assoc_acc *acc =
1152                                 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1153         struct nvmet_fc_tgt_queue *queue;
1154         int ret = 0;
1155
1156         memset(acc, 0, sizeof(*acc));
1157
1158         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1159                 ret = VERR_CR_ASSOC_LEN;
1160         else if (rqst->desc_list_len !=
1161                         fcnvme_lsdesc_len(
1162                                 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1163                 ret = VERR_CR_ASSOC_RQST_LEN;
1164         else if (rqst->assoc_cmd.desc_tag !=
1165                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1166                 ret = VERR_CR_ASSOC_CMD;
1167         else if (rqst->assoc_cmd.desc_len !=
1168                         fcnvme_lsdesc_len(
1169                                 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1170                 ret = VERR_CR_ASSOC_CMD_LEN;
1171         else if (!rqst->assoc_cmd.ersp_ratio ||
1172                  (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1173                                 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1174                 ret = VERR_ERSP_RATIO;
1175
1176         else {
1177                 /* new association w/ admin queue */
1178                 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1179                 if (!iod->assoc)
1180                         ret = VERR_ASSOC_ALLOC_FAIL;
1181                 else {
1182                         queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1183                                         be16_to_cpu(rqst->assoc_cmd.sqsize));
1184                         if (!queue)
1185                                 ret = VERR_QUEUE_ALLOC_FAIL;
1186                 }
1187         }
1188
1189         if (ret) {
1190                 dev_err(tgtport->dev,
1191                         "Create Association LS failed: %s\n",
1192                         validation_errors[ret]);
1193                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1194                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1195                                 FCNVME_RJT_RC_LOGIC,
1196                                 FCNVME_RJT_EXP_NONE, 0);
1197                 return;
1198         }
1199
1200         queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1201         atomic_set(&queue->connected, 1);
1202         queue->sqhd = 0;        /* best place to init value */
1203
1204         /* format a response */
1205
1206         iod->lsreq->rsplen = sizeof(*acc);
1207
1208         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1209                         fcnvme_lsdesc_len(
1210                                 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1211                         FCNVME_LS_CREATE_ASSOCIATION);
1212         acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1213         acc->associd.desc_len =
1214                         fcnvme_lsdesc_len(
1215                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1216         acc->associd.association_id =
1217                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1218         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1219         acc->connectid.desc_len =
1220                         fcnvme_lsdesc_len(
1221                                 sizeof(struct fcnvme_lsdesc_conn_id));
1222         acc->connectid.connection_id = acc->associd.association_id;
1223 }
1224
1225 static void
1226 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1227                         struct nvmet_fc_ls_iod *iod)
1228 {
1229         struct fcnvme_ls_cr_conn_rqst *rqst =
1230                                 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1231         struct fcnvme_ls_cr_conn_acc *acc =
1232                                 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1233         struct nvmet_fc_tgt_queue *queue;
1234         int ret = 0;
1235
1236         memset(acc, 0, sizeof(*acc));
1237
1238         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1239                 ret = VERR_CR_CONN_LEN;
1240         else if (rqst->desc_list_len !=
1241                         fcnvme_lsdesc_len(
1242                                 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1243                 ret = VERR_CR_CONN_RQST_LEN;
1244         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1245                 ret = VERR_ASSOC_ID;
1246         else if (rqst->associd.desc_len !=
1247                         fcnvme_lsdesc_len(
1248                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1249                 ret = VERR_ASSOC_ID_LEN;
1250         else if (rqst->connect_cmd.desc_tag !=
1251                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1252                 ret = VERR_CR_CONN_CMD;
1253         else if (rqst->connect_cmd.desc_len !=
1254                         fcnvme_lsdesc_len(
1255                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1256                 ret = VERR_CR_CONN_CMD_LEN;
1257         else if (!rqst->connect_cmd.ersp_ratio ||
1258                  (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1259                                 be16_to_cpu(rqst->connect_cmd.sqsize)))
1260                 ret = VERR_ERSP_RATIO;
1261
1262         else {
1263                 /* new io queue */
1264                 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1265                                 be64_to_cpu(rqst->associd.association_id));
1266                 if (!iod->assoc)
1267                         ret = VERR_NO_ASSOC;
1268                 else {
1269                         queue = nvmet_fc_alloc_target_queue(iod->assoc,
1270                                         be16_to_cpu(rqst->connect_cmd.qid),
1271                                         be16_to_cpu(rqst->connect_cmd.sqsize));
1272                         if (!queue)
1273                                 ret = VERR_QUEUE_ALLOC_FAIL;
1274
1275                         /* release get taken in nvmet_fc_find_target_assoc */
1276                         nvmet_fc_tgt_a_put(iod->assoc);
1277                 }
1278         }
1279
1280         if (ret) {
1281                 dev_err(tgtport->dev,
1282                         "Create Connection LS failed: %s\n",
1283                         validation_errors[ret]);
1284                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1285                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1286                                 (ret == VERR_NO_ASSOC) ?
1287                                         FCNVME_RJT_RC_INV_ASSOC :
1288                                         FCNVME_RJT_RC_LOGIC,
1289                                 FCNVME_RJT_EXP_NONE, 0);
1290                 return;
1291         }
1292
1293         queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1294         atomic_set(&queue->connected, 1);
1295         queue->sqhd = 0;        /* best place to init value */
1296
1297         /* format a response */
1298
1299         iod->lsreq->rsplen = sizeof(*acc);
1300
1301         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1302                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1303                         FCNVME_LS_CREATE_CONNECTION);
1304         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1305         acc->connectid.desc_len =
1306                         fcnvme_lsdesc_len(
1307                                 sizeof(struct fcnvme_lsdesc_conn_id));
1308         acc->connectid.connection_id =
1309                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1310                                 be16_to_cpu(rqst->connect_cmd.qid)));
1311 }
1312
1313 static void
1314 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1315                         struct nvmet_fc_ls_iod *iod)
1316 {
1317         struct fcnvme_ls_disconnect_rqst *rqst =
1318                         (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1319         struct fcnvme_ls_disconnect_acc *acc =
1320                         (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1321         struct nvmet_fc_tgt_queue *queue = NULL;
1322         struct nvmet_fc_tgt_assoc *assoc;
1323         int ret = 0;
1324         bool del_assoc = false;
1325
1326         memset(acc, 0, sizeof(*acc));
1327
1328         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1329                 ret = VERR_DISCONN_LEN;
1330         else if (rqst->desc_list_len !=
1331                         fcnvme_lsdesc_len(
1332                                 sizeof(struct fcnvme_ls_disconnect_rqst)))
1333                 ret = VERR_DISCONN_RQST_LEN;
1334         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1335                 ret = VERR_ASSOC_ID;
1336         else if (rqst->associd.desc_len !=
1337                         fcnvme_lsdesc_len(
1338                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1339                 ret = VERR_ASSOC_ID_LEN;
1340         else if (rqst->discon_cmd.desc_tag !=
1341                         cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1342                 ret = VERR_DISCONN_CMD;
1343         else if (rqst->discon_cmd.desc_len !=
1344                         fcnvme_lsdesc_len(
1345                                 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1346                 ret = VERR_DISCONN_CMD_LEN;
1347         else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1348                         (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1349                 ret = VERR_DISCONN_SCOPE;
1350         else {
1351                 /* match an active association */
1352                 assoc = nvmet_fc_find_target_assoc(tgtport,
1353                                 be64_to_cpu(rqst->associd.association_id));
1354                 iod->assoc = assoc;
1355                 if (assoc) {
1356                         if (rqst->discon_cmd.scope ==
1357                                         FCNVME_DISCONN_CONNECTION) {
1358                                 queue = nvmet_fc_find_target_queue(tgtport,
1359                                                 be64_to_cpu(
1360                                                         rqst->discon_cmd.id));
1361                                 if (!queue) {
1362                                         nvmet_fc_tgt_a_put(assoc);
1363                                         ret = VERR_NO_CONN;
1364                                 }
1365                         }
1366                 } else
1367                         ret = VERR_NO_ASSOC;
1368         }
1369
1370         if (ret) {
1371                 dev_err(tgtport->dev,
1372                         "Disconnect LS failed: %s\n",
1373                         validation_errors[ret]);
1374                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1375                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1376                                 (ret == VERR_NO_ASSOC) ?
1377                                         FCNVME_RJT_RC_INV_ASSOC :
1378                                         (ret == VERR_NO_CONN) ?
1379                                                 FCNVME_RJT_RC_INV_CONN :
1380                                                 FCNVME_RJT_RC_LOGIC,
1381                                 FCNVME_RJT_EXP_NONE, 0);
1382                 return;
1383         }
1384
1385         /* format a response */
1386
1387         iod->lsreq->rsplen = sizeof(*acc);
1388
1389         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1390                         fcnvme_lsdesc_len(
1391                                 sizeof(struct fcnvme_ls_disconnect_acc)),
1392                         FCNVME_LS_DISCONNECT);
1393
1394
1395         /* are we to delete a Connection ID (queue) */
1396         if (queue) {
1397                 int qid = queue->qid;
1398
1399                 nvmet_fc_delete_target_queue(queue);
1400
1401                 /* release the get taken by find_target_queue */
1402                 nvmet_fc_tgt_q_put(queue);
1403
1404                 /* tear association down if io queue terminated */
1405                 if (!qid)
1406                         del_assoc = true;
1407         }
1408
1409         /* release get taken in nvmet_fc_find_target_assoc */
1410         nvmet_fc_tgt_a_put(iod->assoc);
1411
1412         if (del_assoc)
1413                 nvmet_fc_delete_target_assoc(iod->assoc);
1414 }
1415
1416
1417 /* *********************** NVME Ctrl Routines **************************** */
1418
1419
1420 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1421
1422 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1423
1424 static void
1425 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1426 {
1427         struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1428         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1429
1430         fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1431                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1432         nvmet_fc_free_ls_iod(tgtport, iod);
1433         nvmet_fc_tgtport_put(tgtport);
1434 }
1435
1436 static void
1437 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1438                                 struct nvmet_fc_ls_iod *iod)
1439 {
1440         int ret;
1441
1442         fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1443                                   NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1444
1445         ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1446         if (ret)
1447                 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1448 }
1449
1450 /*
1451  * Actual processing routine for received FC-NVME LS Requests from the LLD
1452  */
1453 static void
1454 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1455                         struct nvmet_fc_ls_iod *iod)
1456 {
1457         struct fcnvme_ls_rqst_w0 *w0 =
1458                         (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1459
1460         iod->lsreq->nvmet_fc_private = iod;
1461         iod->lsreq->rspbuf = iod->rspbuf;
1462         iod->lsreq->rspdma = iod->rspdma;
1463         iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1464         /* Be preventative. handlers will later set to valid length */
1465         iod->lsreq->rsplen = 0;
1466
1467         iod->assoc = NULL;
1468
1469         /*
1470          * handlers:
1471          *   parse request input, execute the request, and format the
1472          *   LS response
1473          */
1474         switch (w0->ls_cmd) {
1475         case FCNVME_LS_CREATE_ASSOCIATION:
1476                 /* Creates Association and initial Admin Queue/Connection */
1477                 nvmet_fc_ls_create_association(tgtport, iod);
1478                 break;
1479         case FCNVME_LS_CREATE_CONNECTION:
1480                 /* Creates an IO Queue/Connection */
1481                 nvmet_fc_ls_create_connection(tgtport, iod);
1482                 break;
1483         case FCNVME_LS_DISCONNECT:
1484                 /* Terminate a Queue/Connection or the Association */
1485                 nvmet_fc_ls_disconnect(tgtport, iod);
1486                 break;
1487         default:
1488                 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1489                                 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1490                                 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1491         }
1492
1493         nvmet_fc_xmt_ls_rsp(tgtport, iod);
1494 }
1495
1496 /*
1497  * Actual processing routine for received FC-NVME LS Requests from the LLD
1498  */
1499 static void
1500 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1501 {
1502         struct nvmet_fc_ls_iod *iod =
1503                 container_of(work, struct nvmet_fc_ls_iod, work);
1504         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1505
1506         nvmet_fc_handle_ls_rqst(tgtport, iod);
1507 }
1508
1509
1510 /**
1511  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1512  *                       upon the reception of a NVME LS request.
1513  *
1514  * The nvmet-fc layer will copy payload to an internal structure for
1515  * processing.  As such, upon completion of the routine, the LLDD may
1516  * immediately free/reuse the LS request buffer passed in the call.
1517  *
1518  * If this routine returns error, the LLDD should abort the exchange.
1519  *
1520  * @tgtport:    pointer to the (registered) target port the LS was
1521  *              received on.
1522  * @lsreq:      pointer to a lsreq request structure to be used to reference
1523  *              the exchange corresponding to the LS.
1524  * @lsreqbuf:   pointer to the buffer containing the LS Request
1525  * @lsreqbuf_len: length, in bytes, of the received LS request
1526  */
1527 int
1528 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1529                         struct nvmefc_tgt_ls_req *lsreq,
1530                         void *lsreqbuf, u32 lsreqbuf_len)
1531 {
1532         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1533         struct nvmet_fc_ls_iod *iod;
1534
1535         if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1536                 return -E2BIG;
1537
1538         if (!nvmet_fc_tgtport_get(tgtport))
1539                 return -ESHUTDOWN;
1540
1541         iod = nvmet_fc_alloc_ls_iod(tgtport);
1542         if (!iod) {
1543                 nvmet_fc_tgtport_put(tgtport);
1544                 return -ENOENT;
1545         }
1546
1547         iod->lsreq = lsreq;
1548         iod->fcpreq = NULL;
1549         memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1550         iod->rqstdatalen = lsreqbuf_len;
1551
1552         schedule_work(&iod->work);
1553
1554         return 0;
1555 }
1556 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1557
1558
1559 /*
1560  * **********************
1561  * Start of FCP handling
1562  * **********************
1563  */
1564
1565 static int
1566 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1567 {
1568         struct scatterlist *sg;
1569         struct page *page;
1570         unsigned int nent;
1571         u32 page_len, length;
1572         int i = 0;
1573
1574         length = fod->total_length;
1575         nent = DIV_ROUND_UP(length, PAGE_SIZE);
1576         sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1577         if (!sg)
1578                 goto out;
1579
1580         sg_init_table(sg, nent);
1581
1582         while (length) {
1583                 page_len = min_t(u32, length, PAGE_SIZE);
1584
1585                 page = alloc_page(GFP_KERNEL);
1586                 if (!page)
1587                         goto out_free_pages;
1588
1589                 sg_set_page(&sg[i], page, page_len, 0);
1590                 length -= page_len;
1591                 i++;
1592         }
1593
1594         fod->data_sg = sg;
1595         fod->data_sg_cnt = nent;
1596         fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1597                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1598                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1599                                 /* note: write from initiator perspective */
1600
1601         return 0;
1602
1603 out_free_pages:
1604         while (i > 0) {
1605                 i--;
1606                 __free_page(sg_page(&sg[i]));
1607         }
1608         kfree(sg);
1609         fod->data_sg = NULL;
1610         fod->data_sg_cnt = 0;
1611 out:
1612         return NVME_SC_INTERNAL;
1613 }
1614
1615 static void
1616 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1617 {
1618         struct scatterlist *sg;
1619         int count;
1620
1621         if (!fod->data_sg || !fod->data_sg_cnt)
1622                 return;
1623
1624         fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1625                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1626                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1627         for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1628                 __free_page(sg_page(sg));
1629         kfree(fod->data_sg);
1630         fod->data_sg = NULL;
1631         fod->data_sg_cnt = 0;
1632 }
1633
1634
1635 static bool
1636 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1637 {
1638         u32 sqtail, used;
1639
1640         /* egad, this is ugly. And sqtail is just a best guess */
1641         sqtail = atomic_read(&q->sqtail) % q->sqsize;
1642
1643         used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1644         return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1645 }
1646
1647 /*
1648  * Prep RSP payload.
1649  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1650  */
1651 static void
1652 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1653                                 struct nvmet_fc_fcp_iod *fod)
1654 {
1655         struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1656         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1657         struct nvme_completion *cqe = &ersp->cqe;
1658         u32 *cqewd = (u32 *)cqe;
1659         bool send_ersp = false;
1660         u32 rsn, rspcnt, xfr_length;
1661
1662         if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1663                 xfr_length = fod->total_length;
1664         else
1665                 xfr_length = fod->offset;
1666
1667         /*
1668          * check to see if we can send a 0's rsp.
1669          *   Note: to send a 0's response, the NVME-FC host transport will
1670          *   recreate the CQE. The host transport knows: sq id, SQHD (last
1671          *   seen in an ersp), and command_id. Thus it will create a
1672          *   zero-filled CQE with those known fields filled in. Transport
1673          *   must send an ersp for any condition where the cqe won't match
1674          *   this.
1675          *
1676          * Here are the FC-NVME mandated cases where we must send an ersp:
1677          *  every N responses, where N=ersp_ratio
1678          *  force fabric commands to send ersp's (not in FC-NVME but good
1679          *    practice)
1680          *  normal cmds: any time status is non-zero, or status is zero
1681          *     but words 0 or 1 are non-zero.
1682          *  the SQ is 90% or more full
1683          *  the cmd is a fused command
1684          *  transferred data length not equal to cmd iu length
1685          */
1686         rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1687         if (!(rspcnt % fod->queue->ersp_ratio) ||
1688             sqe->opcode == nvme_fabrics_command ||
1689             xfr_length != fod->total_length ||
1690             (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1691             (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1692             queue_90percent_full(fod->queue, cqe->sq_head))
1693                 send_ersp = true;
1694
1695         /* re-set the fields */
1696         fod->fcpreq->rspaddr = ersp;
1697         fod->fcpreq->rspdma = fod->rspdma;
1698
1699         if (!send_ersp) {
1700                 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1701                 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1702         } else {
1703                 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1704                 rsn = atomic_inc_return(&fod->queue->rsn);
1705                 ersp->rsn = cpu_to_be32(rsn);
1706                 ersp->xfrd_len = cpu_to_be32(xfr_length);
1707                 fod->fcpreq->rsplen = sizeof(*ersp);
1708         }
1709
1710         fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1711                                   sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1712 }
1713
1714 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1715
1716 static void
1717 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1718                                 struct nvmet_fc_fcp_iod *fod)
1719 {
1720         int ret;
1721
1722         fod->fcpreq->op = NVMET_FCOP_RSP;
1723         fod->fcpreq->timeout = 0;
1724
1725         nvmet_fc_prep_fcp_rsp(tgtport, fod);
1726
1727         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1728         if (ret)
1729                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1730 }
1731
1732 static void
1733 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1734                                 struct nvmet_fc_fcp_iod *fod, u8 op)
1735 {
1736         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1737         struct scatterlist *sg, *datasg;
1738         u32 tlen, sg_off;
1739         int ret;
1740
1741         fcpreq->op = op;
1742         fcpreq->offset = fod->offset;
1743         fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1744         tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1745                         (fod->total_length - fod->offset));
1746         tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1747         tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1748                                         * PAGE_SIZE);
1749         fcpreq->transfer_length = tlen;
1750         fcpreq->transferred_length = 0;
1751         fcpreq->fcp_error = 0;
1752         fcpreq->rsplen = 0;
1753
1754         fcpreq->sg_cnt = 0;
1755
1756         datasg = fod->next_sg;
1757         sg_off = fod->next_sg_offset;
1758
1759         for (sg = fcpreq->sg ; tlen; sg++) {
1760                 *sg = *datasg;
1761                 if (sg_off) {
1762                         sg->offset += sg_off;
1763                         sg->length -= sg_off;
1764                         sg->dma_address += sg_off;
1765                         sg_off = 0;
1766                 }
1767                 if (tlen < sg->length) {
1768                         sg->length = tlen;
1769                         fod->next_sg = datasg;
1770                         fod->next_sg_offset += tlen;
1771                 } else if (tlen == sg->length) {
1772                         fod->next_sg_offset = 0;
1773                         fod->next_sg = sg_next(datasg);
1774                 } else {
1775                         fod->next_sg_offset = 0;
1776                         datasg = sg_next(datasg);
1777                 }
1778                 tlen -= sg->length;
1779                 fcpreq->sg_cnt++;
1780         }
1781
1782         /*
1783          * If the last READDATA request: check if LLDD supports
1784          * combined xfr with response.
1785          */
1786         if ((op == NVMET_FCOP_READDATA) &&
1787             ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1788             (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1789                 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1790                 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1791         }
1792
1793         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1794         if (ret) {
1795                 /*
1796                  * should be ok to set w/o lock as its in the thread of
1797                  * execution (not an async timer routine) and doesn't
1798                  * contend with any clearing action
1799                  */
1800                 fod->abort = true;
1801
1802                 if (op == NVMET_FCOP_WRITEDATA)
1803                         nvmet_req_complete(&fod->req,
1804                                         NVME_SC_FC_TRANSPORT_ERROR);
1805                 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1806                         fcpreq->fcp_error = ret;
1807                         fcpreq->transferred_length = 0;
1808                         nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1809                 }
1810         }
1811 }
1812
1813 /*
1814  * actual done handler for FCP operations when completed by the lldd
1815  */
1816 static void
1817 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1818 {
1819         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1820         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1821         unsigned long flags;
1822         bool abort;
1823
1824         spin_lock_irqsave(&fod->flock, flags);
1825         abort = fod->abort;
1826         spin_unlock_irqrestore(&fod->flock, flags);
1827
1828         /* if in the middle of an io and we need to tear down */
1829         if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1830                 /* data no longer needed */
1831                 nvmet_fc_free_tgt_pgs(fod);
1832
1833                 nvmet_req_complete(&fod->req, fcpreq->fcp_error);
1834                 return;
1835         }
1836
1837         switch (fcpreq->op) {
1838
1839         case NVMET_FCOP_WRITEDATA:
1840                 if (fcpreq->fcp_error ||
1841                     fcpreq->transferred_length != fcpreq->transfer_length) {
1842                         nvmet_req_complete(&fod->req,
1843                                         NVME_SC_FC_TRANSPORT_ERROR);
1844                         return;
1845                 }
1846
1847                 fod->offset += fcpreq->transferred_length;
1848                 if (fod->offset != fod->total_length) {
1849                         /* transfer the next chunk */
1850                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1851                                                 NVMET_FCOP_WRITEDATA);
1852                         return;
1853                 }
1854
1855                 /* data transfer complete, resume with nvmet layer */
1856
1857                 fod->req.execute(&fod->req);
1858
1859                 break;
1860
1861         case NVMET_FCOP_READDATA:
1862         case NVMET_FCOP_READDATA_RSP:
1863                 if (fcpreq->fcp_error ||
1864                     fcpreq->transferred_length != fcpreq->transfer_length) {
1865                         /* data no longer needed */
1866                         nvmet_fc_free_tgt_pgs(fod);
1867
1868                         nvmet_fc_abort_op(tgtport, fod->fcpreq);
1869                         return;
1870                 }
1871
1872                 /* success */
1873
1874                 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1875                         /* data no longer needed */
1876                         nvmet_fc_free_tgt_pgs(fod);
1877                         fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1878                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1879                         nvmet_fc_free_fcp_iod(fod->queue, fod);
1880                         return;
1881                 }
1882
1883                 fod->offset += fcpreq->transferred_length;
1884                 if (fod->offset != fod->total_length) {
1885                         /* transfer the next chunk */
1886                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1887                                                 NVMET_FCOP_READDATA);
1888                         return;
1889                 }
1890
1891                 /* data transfer complete, send response */
1892
1893                 /* data no longer needed */
1894                 nvmet_fc_free_tgt_pgs(fod);
1895
1896                 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1897
1898                 break;
1899
1900         case NVMET_FCOP_RSP:
1901         case NVMET_FCOP_ABORT:
1902                 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1903                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1904                 nvmet_fc_free_fcp_iod(fod->queue, fod);
1905                 break;
1906
1907         default:
1908                 nvmet_fc_free_tgt_pgs(fod);
1909                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1910                 break;
1911         }
1912 }
1913
1914 static void
1915 nvmet_fc_fcp_rqst_op_done_work(struct work_struct *work)
1916 {
1917         struct nvmet_fc_fcp_iod *fod =
1918                 container_of(work, struct nvmet_fc_fcp_iod, done_work);
1919
1920         nvmet_fc_fod_op_done(fod);
1921 }
1922
1923 static void
1924 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1925 {
1926         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1927         struct nvmet_fc_tgt_queue *queue = fod->queue;
1928
1929         if (fod->tgtport->ops->target_features & NVMET_FCTGTFEAT_OPDONE_IN_ISR)
1930                 /* context switch so completion is not in ISR context */
1931                 queue_work_on(queue->cpu, queue->work_q, &fod->done_work);
1932         else
1933                 nvmet_fc_fod_op_done(fod);
1934 }
1935
1936 /*
1937  * actual completion handler after execution by the nvmet layer
1938  */
1939 static void
1940 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1941                         struct nvmet_fc_fcp_iod *fod, int status)
1942 {
1943         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1944         struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1945         unsigned long flags;
1946         bool abort;
1947
1948         spin_lock_irqsave(&fod->flock, flags);
1949         abort = fod->abort;
1950         spin_unlock_irqrestore(&fod->flock, flags);
1951
1952         /* if we have a CQE, snoop the last sq_head value */
1953         if (!status)
1954                 fod->queue->sqhd = cqe->sq_head;
1955
1956         if (abort) {
1957                 /* data no longer needed */
1958                 nvmet_fc_free_tgt_pgs(fod);
1959
1960                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1961                 return;
1962         }
1963
1964         /* if an error handling the cmd post initial parsing */
1965         if (status) {
1966                 /* fudge up a failed CQE status for our transport error */
1967                 memset(cqe, 0, sizeof(*cqe));
1968                 cqe->sq_head = fod->queue->sqhd;        /* echo last cqe sqhd */
1969                 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1970                 cqe->command_id = sqe->command_id;
1971                 cqe->status = cpu_to_le16(status);
1972         } else {
1973
1974                 /*
1975                  * try to push the data even if the SQE status is non-zero.
1976                  * There may be a status where data still was intended to
1977                  * be moved
1978                  */
1979                 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1980                         /* push the data over before sending rsp */
1981                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1982                                                 NVMET_FCOP_READDATA);
1983                         return;
1984                 }
1985
1986                 /* writes & no data - fall thru */
1987         }
1988
1989         /* data no longer needed */
1990         nvmet_fc_free_tgt_pgs(fod);
1991
1992         nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1993 }
1994
1995
1996 static void
1997 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1998 {
1999         struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2000         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2001
2002         __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2003 }
2004
2005
2006 /*
2007  * Actual processing routine for received FC-NVME LS Requests from the LLD
2008  */
2009 void
2010 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2011                         struct nvmet_fc_fcp_iod *fod)
2012 {
2013         struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2014         int ret;
2015
2016         /*
2017          * Fused commands are currently not supported in the linux
2018          * implementation.
2019          *
2020          * As such, the implementation of the FC transport does not
2021          * look at the fused commands and order delivery to the upper
2022          * layer until we have both based on csn.
2023          */
2024
2025         fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2026
2027         fod->total_length = be32_to_cpu(cmdiu->data_len);
2028         if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2029                 fod->io_dir = NVMET_FCP_WRITE;
2030                 if (!nvme_is_write(&cmdiu->sqe))
2031                         goto transport_error;
2032         } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2033                 fod->io_dir = NVMET_FCP_READ;
2034                 if (nvme_is_write(&cmdiu->sqe))
2035                         goto transport_error;
2036         } else {
2037                 fod->io_dir = NVMET_FCP_NODATA;
2038                 if (fod->total_length)
2039                         goto transport_error;
2040         }
2041
2042         fod->req.cmd = &fod->cmdiubuf.sqe;
2043         fod->req.rsp = &fod->rspiubuf.cqe;
2044         fod->req.port = fod->queue->port;
2045
2046         /* ensure nvmet handlers will set cmd handler callback */
2047         fod->req.execute = NULL;
2048
2049         /* clear any response payload */
2050         memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2051
2052         ret = nvmet_req_init(&fod->req,
2053                                 &fod->queue->nvme_cq,
2054                                 &fod->queue->nvme_sq,
2055                                 &nvmet_fc_tgt_fcp_ops);
2056         if (!ret) {     /* bad SQE content */
2057                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2058                 return;
2059         }
2060
2061         /* keep a running counter of tail position */
2062         atomic_inc(&fod->queue->sqtail);
2063
2064         fod->data_sg = NULL;
2065         fod->data_sg_cnt = 0;
2066         if (fod->total_length) {
2067                 ret = nvmet_fc_alloc_tgt_pgs(fod);
2068                 if (ret) {
2069                         nvmet_req_complete(&fod->req, ret);
2070                         return;
2071                 }
2072         }
2073         fod->req.sg = fod->data_sg;
2074         fod->req.sg_cnt = fod->data_sg_cnt;
2075         fod->offset = 0;
2076         fod->next_sg = fod->data_sg;
2077         fod->next_sg_offset = 0;
2078
2079         if (fod->io_dir == NVMET_FCP_WRITE) {
2080                 /* pull the data over before invoking nvmet layer */
2081                 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2082                 return;
2083         }
2084
2085         /*
2086          * Reads or no data:
2087          *
2088          * can invoke the nvmet_layer now. If read data, cmd completion will
2089          * push the data
2090          */
2091
2092         fod->req.execute(&fod->req);
2093
2094         return;
2095
2096 transport_error:
2097         nvmet_fc_abort_op(tgtport, fod->fcpreq);
2098 }
2099
2100 /*
2101  * Actual processing routine for received FC-NVME LS Requests from the LLD
2102  */
2103 static void
2104 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2105 {
2106         struct nvmet_fc_fcp_iod *fod =
2107                 container_of(work, struct nvmet_fc_fcp_iod, work);
2108         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2109
2110         nvmet_fc_handle_fcp_rqst(tgtport, fod);
2111 }
2112
2113 /**
2114  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2115  *                       upon the reception of a NVME FCP CMD IU.
2116  *
2117  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2118  * layer for processing.
2119  *
2120  * The nvmet-fc layer will copy cmd payload to an internal structure for
2121  * processing.  As such, upon completion of the routine, the LLDD may
2122  * immediately free/reuse the CMD IU buffer passed in the call.
2123  *
2124  * If this routine returns error, the lldd should abort the exchange.
2125  *
2126  * @target_port: pointer to the (registered) target port the FCP CMD IU
2127  *              was receive on.
2128  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2129  *              the exchange corresponding to the FCP Exchange.
2130  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2131  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2132  */
2133 int
2134 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2135                         struct nvmefc_tgt_fcp_req *fcpreq,
2136                         void *cmdiubuf, u32 cmdiubuf_len)
2137 {
2138         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2139         struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2140         struct nvmet_fc_tgt_queue *queue;
2141         struct nvmet_fc_fcp_iod *fod;
2142
2143         /* validate iu, so the connection id can be used to find the queue */
2144         if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2145                         (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2146                         (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2147                         (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2148                 return -EIO;
2149
2150
2151         queue = nvmet_fc_find_target_queue(tgtport,
2152                                 be64_to_cpu(cmdiu->connection_id));
2153         if (!queue)
2154                 return -ENOTCONN;
2155
2156         /*
2157          * note: reference taken by find_target_queue
2158          * After successful fod allocation, the fod will inherit the
2159          * ownership of that reference and will remove the reference
2160          * when the fod is freed.
2161          */
2162
2163         fod = nvmet_fc_alloc_fcp_iod(queue);
2164         if (!fod) {
2165                 /* release the queue lookup reference */
2166                 nvmet_fc_tgt_q_put(queue);
2167                 return -ENOENT;
2168         }
2169
2170         fcpreq->nvmet_fc_private = fod;
2171         fod->fcpreq = fcpreq;
2172         /*
2173          * put all admin cmds on hw queue id 0. All io commands go to
2174          * the respective hw queue based on a modulo basis
2175          */
2176         fcpreq->hwqid = queue->qid ?
2177                         ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2178         memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2179
2180         if (tgtport->ops->target_features & NVMET_FCTGTFEAT_CMD_IN_ISR)
2181                 queue_work_on(queue->cpu, queue->work_q, &fod->work);
2182         else
2183                 nvmet_fc_handle_fcp_rqst(tgtport, fod);
2184
2185         return 0;
2186 }
2187 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2188
2189 enum {
2190         FCT_TRADDR_ERR          = 0,
2191         FCT_TRADDR_WWNN         = 1 << 0,
2192         FCT_TRADDR_WWPN         = 1 << 1,
2193 };
2194
2195 struct nvmet_fc_traddr {
2196         u64     nn;
2197         u64     pn;
2198 };
2199
2200 static const match_table_t traddr_opt_tokens = {
2201         { FCT_TRADDR_WWNN,      "nn-%s"         },
2202         { FCT_TRADDR_WWPN,      "pn-%s"         },
2203         { FCT_TRADDR_ERR,       NULL            }
2204 };
2205
2206 static int
2207 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2208 {
2209         substring_t args[MAX_OPT_ARGS];
2210         char *options, *o, *p;
2211         int token, ret = 0;
2212         u64 token64;
2213
2214         options = o = kstrdup(buf, GFP_KERNEL);
2215         if (!options)
2216                 return -ENOMEM;
2217
2218         while ((p = strsep(&o, ",\n")) != NULL) {
2219                 if (!*p)
2220                         continue;
2221
2222                 token = match_token(p, traddr_opt_tokens, args);
2223                 switch (token) {
2224                 case FCT_TRADDR_WWNN:
2225                         if (match_u64(args, &token64)) {
2226                                 ret = -EINVAL;
2227                                 goto out;
2228                         }
2229                         traddr->nn = token64;
2230                         break;
2231                 case FCT_TRADDR_WWPN:
2232                         if (match_u64(args, &token64)) {
2233                                 ret = -EINVAL;
2234                                 goto out;
2235                         }
2236                         traddr->pn = token64;
2237                         break;
2238                 default:
2239                         pr_warn("unknown traddr token or missing value '%s'\n",
2240                                         p);
2241                         ret = -EINVAL;
2242                         goto out;
2243                 }
2244         }
2245
2246 out:
2247         kfree(options);
2248         return ret;
2249 }
2250
2251 static int
2252 nvmet_fc_add_port(struct nvmet_port *port)
2253 {
2254         struct nvmet_fc_tgtport *tgtport;
2255         struct nvmet_fc_traddr traddr = { 0L, 0L };
2256         unsigned long flags;
2257         int ret;
2258
2259         /* validate the address info */
2260         if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2261             (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2262                 return -EINVAL;
2263
2264         /* map the traddr address info to a target port */
2265
2266         ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2267         if (ret)
2268                 return ret;
2269
2270         ret = -ENXIO;
2271         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2272         list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2273                 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2274                     (tgtport->fc_target_port.port_name == traddr.pn)) {
2275                         /* a FC port can only be 1 nvmet port id */
2276                         if (!tgtport->port) {
2277                                 tgtport->port = port;
2278                                 port->priv = tgtport;
2279                                 ret = 0;
2280                         } else
2281                                 ret = -EALREADY;
2282                         break;
2283                 }
2284         }
2285         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2286         return ret;
2287 }
2288
2289 static void
2290 nvmet_fc_remove_port(struct nvmet_port *port)
2291 {
2292         struct nvmet_fc_tgtport *tgtport = port->priv;
2293         unsigned long flags;
2294
2295         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2296         if (tgtport->port == port) {
2297                 nvmet_fc_tgtport_put(tgtport);
2298                 tgtport->port = NULL;
2299         }
2300         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2301 }
2302
2303 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2304         .owner                  = THIS_MODULE,
2305         .type                   = NVMF_TRTYPE_FC,
2306         .msdbd                  = 1,
2307         .add_port               = nvmet_fc_add_port,
2308         .remove_port            = nvmet_fc_remove_port,
2309         .queue_response         = nvmet_fc_fcp_nvme_cmd_done,
2310         .delete_ctrl            = nvmet_fc_delete_ctrl,
2311 };
2312
2313 static int __init nvmet_fc_init_module(void)
2314 {
2315         return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2316 }
2317
2318 static void __exit nvmet_fc_exit_module(void)
2319 {
2320         /* sanity check - all lports should be removed */
2321         if (!list_empty(&nvmet_fc_target_list))
2322                 pr_warn("%s: targetport list not empty\n", __func__);
2323
2324         nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2325
2326         ida_destroy(&nvmet_fc_tgtport_cnt);
2327 }
2328
2329 module_init(nvmet_fc_init_module);
2330 module_exit(nvmet_fc_exit_module);
2331
2332 MODULE_LICENSE("GPL v2");