]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/nvme/target/fc.c
Merge branch 'for-linus' into for-4.12/block
[karo-tx-linux.git] / drivers / nvme / target / fc.c
1 /*
2  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful.
9  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
10  * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
11  * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
12  * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
13  * See the GNU General Public License for more details, a copy of which
14  * can be found in the file COPYING included with this package
15  *
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/blk-mq.h>
21 #include <linux/parser.h>
22 #include <linux/random.h>
23 #include <uapi/scsi/fc/fc_fs.h>
24 #include <uapi/scsi/fc/fc_els.h>
25
26 #include "nvmet.h"
27 #include <linux/nvme-fc-driver.h>
28 #include <linux/nvme-fc.h>
29
30
31 /* *************************** Data Structures/Defines ****************** */
32
33
34 #define NVMET_LS_CTX_COUNT              4
35
36 /* for this implementation, assume small single frame rqst/rsp */
37 #define NVME_FC_MAX_LS_BUFFER_SIZE              2048
38
39 struct nvmet_fc_tgtport;
40 struct nvmet_fc_tgt_assoc;
41
42 struct nvmet_fc_ls_iod {
43         struct nvmefc_tgt_ls_req        *lsreq;
44         struct nvmefc_tgt_fcp_req       *fcpreq;        /* only if RS */
45
46         struct list_head                ls_list;        /* tgtport->ls_list */
47
48         struct nvmet_fc_tgtport         *tgtport;
49         struct nvmet_fc_tgt_assoc       *assoc;
50
51         u8                              *rqstbuf;
52         u8                              *rspbuf;
53         u16                             rqstdatalen;
54         dma_addr_t                      rspdma;
55
56         struct scatterlist              sg[2];
57
58         struct work_struct              work;
59 } __aligned(sizeof(unsigned long long));
60
61 #define NVMET_FC_MAX_KB_PER_XFR         256
62
63 enum nvmet_fcp_datadir {
64         NVMET_FCP_NODATA,
65         NVMET_FCP_WRITE,
66         NVMET_FCP_READ,
67         NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71         struct nvmefc_tgt_fcp_req       *fcpreq;
72
73         struct nvme_fc_cmd_iu           cmdiubuf;
74         struct nvme_fc_ersp_iu          rspiubuf;
75         dma_addr_t                      rspdma;
76         struct scatterlist              *data_sg;
77         struct scatterlist              *next_sg;
78         int                             data_sg_cnt;
79         u32                             next_sg_offset;
80         u32                             total_length;
81         u32                             offset;
82         enum nvmet_fcp_datadir          io_dir;
83         bool                            active;
84         bool                            abort;
85         spinlock_t                      flock;
86
87         struct nvmet_req                req;
88         struct work_struct              work;
89
90         struct nvmet_fc_tgtport         *tgtport;
91         struct nvmet_fc_tgt_queue       *queue;
92
93         struct list_head                fcp_list;       /* tgtport->fcp_list */
94 };
95
96 struct nvmet_fc_tgtport {
97
98         struct nvmet_fc_target_port     fc_target_port;
99
100         struct list_head                tgt_list; /* nvmet_fc_target_list */
101         struct device                   *dev;   /* dev for dma mapping */
102         struct nvmet_fc_target_template *ops;
103
104         struct nvmet_fc_ls_iod          *iod;
105         spinlock_t                      lock;
106         struct list_head                ls_list;
107         struct list_head                ls_busylist;
108         struct list_head                assoc_list;
109         struct ida                      assoc_cnt;
110         struct nvmet_port               *port;
111         struct kref                     ref;
112 };
113
114 struct nvmet_fc_tgt_queue {
115         bool                            ninetypercent;
116         u16                             qid;
117         u16                             sqsize;
118         u16                             ersp_ratio;
119         u16                             sqhd;
120         int                             cpu;
121         atomic_t                        connected;
122         atomic_t                        sqtail;
123         atomic_t                        zrspcnt;
124         atomic_t                        rsn;
125         spinlock_t                      qlock;
126         struct nvmet_port               *port;
127         struct nvmet_cq                 nvme_cq;
128         struct nvmet_sq                 nvme_sq;
129         struct nvmet_fc_tgt_assoc       *assoc;
130         struct nvmet_fc_fcp_iod         *fod;           /* array of fcp_iods */
131         struct list_head                fod_list;
132         struct workqueue_struct         *work_q;
133         struct kref                     ref;
134 } __aligned(sizeof(unsigned long long));
135
136 struct nvmet_fc_tgt_assoc {
137         u64                             association_id;
138         u32                             a_id;
139         struct nvmet_fc_tgtport         *tgtport;
140         struct list_head                a_list;
141         struct nvmet_fc_tgt_queue       *queues[NVMET_NR_QUEUES];
142         struct kref                     ref;
143 };
144
145
146 static inline int
147 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
148 {
149         return (iodptr - iodptr->tgtport->iod);
150 }
151
152 static inline int
153 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
154 {
155         return (fodptr - fodptr->queue->fod);
156 }
157
158
159 /*
160  * Association and Connection IDs:
161  *
162  * Association ID will have random number in upper 6 bytes and zero
163  *   in lower 2 bytes
164  *
165  * Connection IDs will be Association ID with QID or'd in lower 2 bytes
166  *
167  * note: Association ID = Connection ID for queue 0
168  */
169 #define BYTES_FOR_QID                   sizeof(u16)
170 #define BYTES_FOR_QID_SHIFT             (BYTES_FOR_QID * 8)
171 #define NVMET_FC_QUEUEID_MASK           ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
172
173 static inline u64
174 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
175 {
176         return (assoc->association_id | qid);
177 }
178
179 static inline u64
180 nvmet_fc_getassociationid(u64 connectionid)
181 {
182         return connectionid & ~NVMET_FC_QUEUEID_MASK;
183 }
184
185 static inline u16
186 nvmet_fc_getqueueid(u64 connectionid)
187 {
188         return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
189 }
190
191 static inline struct nvmet_fc_tgtport *
192 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
193 {
194         return container_of(targetport, struct nvmet_fc_tgtport,
195                                  fc_target_port);
196 }
197
198 static inline struct nvmet_fc_fcp_iod *
199 nvmet_req_to_fod(struct nvmet_req *nvme_req)
200 {
201         return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
202 }
203
204
205 /* *************************** Globals **************************** */
206
207
208 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
209
210 static LIST_HEAD(nvmet_fc_target_list);
211 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
212
213
214 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
215 static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
216 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
217 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
218 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
219 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
220 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
221 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
222
223
224 /* *********************** FC-NVME DMA Handling **************************** */
225
226 /*
227  * The fcloop device passes in a NULL device pointer. Real LLD's will
228  * pass in a valid device pointer. If NULL is passed to the dma mapping
229  * routines, depending on the platform, it may or may not succeed, and
230  * may crash.
231  *
232  * As such:
233  * Wrapper all the dma routines and check the dev pointer.
234  *
235  * If simple mappings (return just a dma address, we'll noop them,
236  * returning a dma address of 0.
237  *
238  * On more complex mappings (dma_map_sg), a pseudo routine fills
239  * in the scatter list, setting all dma addresses to 0.
240  */
241
242 static inline dma_addr_t
243 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
244                 enum dma_data_direction dir)
245 {
246         return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
247 }
248
249 static inline int
250 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
251 {
252         return dev ? dma_mapping_error(dev, dma_addr) : 0;
253 }
254
255 static inline void
256 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
257         enum dma_data_direction dir)
258 {
259         if (dev)
260                 dma_unmap_single(dev, addr, size, dir);
261 }
262
263 static inline void
264 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
265                 enum dma_data_direction dir)
266 {
267         if (dev)
268                 dma_sync_single_for_cpu(dev, addr, size, dir);
269 }
270
271 static inline void
272 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
273                 enum dma_data_direction dir)
274 {
275         if (dev)
276                 dma_sync_single_for_device(dev, addr, size, dir);
277 }
278
279 /* pseudo dma_map_sg call */
280 static int
281 fc_map_sg(struct scatterlist *sg, int nents)
282 {
283         struct scatterlist *s;
284         int i;
285
286         WARN_ON(nents == 0 || sg[0].length == 0);
287
288         for_each_sg(sg, s, nents, i) {
289                 s->dma_address = 0L;
290 #ifdef CONFIG_NEED_SG_DMA_LENGTH
291                 s->dma_length = s->length;
292 #endif
293         }
294         return nents;
295 }
296
297 static inline int
298 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
299                 enum dma_data_direction dir)
300 {
301         return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
302 }
303
304 static inline void
305 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
306                 enum dma_data_direction dir)
307 {
308         if (dev)
309                 dma_unmap_sg(dev, sg, nents, dir);
310 }
311
312
313 /* *********************** FC-NVME Port Management ************************ */
314
315
316 static int
317 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
318 {
319         struct nvmet_fc_ls_iod *iod;
320         int i;
321
322         iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
323                         GFP_KERNEL);
324         if (!iod)
325                 return -ENOMEM;
326
327         tgtport->iod = iod;
328
329         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
330                 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
331                 iod->tgtport = tgtport;
332                 list_add_tail(&iod->ls_list, &tgtport->ls_list);
333
334                 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
335                         GFP_KERNEL);
336                 if (!iod->rqstbuf)
337                         goto out_fail;
338
339                 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
340
341                 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
342                                                 NVME_FC_MAX_LS_BUFFER_SIZE,
343                                                 DMA_TO_DEVICE);
344                 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
345                         goto out_fail;
346         }
347
348         return 0;
349
350 out_fail:
351         kfree(iod->rqstbuf);
352         list_del(&iod->ls_list);
353         for (iod--, i--; i >= 0; iod--, i--) {
354                 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
355                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
356                 kfree(iod->rqstbuf);
357                 list_del(&iod->ls_list);
358         }
359
360         kfree(iod);
361
362         return -EFAULT;
363 }
364
365 static void
366 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
367 {
368         struct nvmet_fc_ls_iod *iod = tgtport->iod;
369         int i;
370
371         for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
372                 fc_dma_unmap_single(tgtport->dev,
373                                 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
374                                 DMA_TO_DEVICE);
375                 kfree(iod->rqstbuf);
376                 list_del(&iod->ls_list);
377         }
378         kfree(tgtport->iod);
379 }
380
381 static struct nvmet_fc_ls_iod *
382 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
383 {
384         static struct nvmet_fc_ls_iod *iod;
385         unsigned long flags;
386
387         spin_lock_irqsave(&tgtport->lock, flags);
388         iod = list_first_entry_or_null(&tgtport->ls_list,
389                                         struct nvmet_fc_ls_iod, ls_list);
390         if (iod)
391                 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
392         spin_unlock_irqrestore(&tgtport->lock, flags);
393         return iod;
394 }
395
396
397 static void
398 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
399                         struct nvmet_fc_ls_iod *iod)
400 {
401         unsigned long flags;
402
403         spin_lock_irqsave(&tgtport->lock, flags);
404         list_move(&iod->ls_list, &tgtport->ls_list);
405         spin_unlock_irqrestore(&tgtport->lock, flags);
406 }
407
408 static void
409 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
410                                 struct nvmet_fc_tgt_queue *queue)
411 {
412         struct nvmet_fc_fcp_iod *fod = queue->fod;
413         int i;
414
415         for (i = 0; i < queue->sqsize; fod++, i++) {
416                 INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
417                 fod->tgtport = tgtport;
418                 fod->queue = queue;
419                 fod->active = false;
420                 list_add_tail(&fod->fcp_list, &queue->fod_list);
421                 spin_lock_init(&fod->flock);
422
423                 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
424                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
425                 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
426                         list_del(&fod->fcp_list);
427                         for (fod--, i--; i >= 0; fod--, i--) {
428                                 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
429                                                 sizeof(fod->rspiubuf),
430                                                 DMA_TO_DEVICE);
431                                 fod->rspdma = 0L;
432                                 list_del(&fod->fcp_list);
433                         }
434
435                         return;
436                 }
437         }
438 }
439
440 static void
441 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
442                                 struct nvmet_fc_tgt_queue *queue)
443 {
444         struct nvmet_fc_fcp_iod *fod = queue->fod;
445         int i;
446
447         for (i = 0; i < queue->sqsize; fod++, i++) {
448                 if (fod->rspdma)
449                         fc_dma_unmap_single(tgtport->dev, fod->rspdma,
450                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
451         }
452 }
453
454 static struct nvmet_fc_fcp_iod *
455 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
456 {
457         static struct nvmet_fc_fcp_iod *fod;
458         unsigned long flags;
459
460         spin_lock_irqsave(&queue->qlock, flags);
461         fod = list_first_entry_or_null(&queue->fod_list,
462                                         struct nvmet_fc_fcp_iod, fcp_list);
463         if (fod) {
464                 list_del(&fod->fcp_list);
465                 fod->active = true;
466                 fod->abort = false;
467                 /*
468                  * no queue reference is taken, as it was taken by the
469                  * queue lookup just prior to the allocation. The iod
470                  * will "inherit" that reference.
471                  */
472         }
473         spin_unlock_irqrestore(&queue->qlock, flags);
474         return fod;
475 }
476
477
478 static void
479 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
480                         struct nvmet_fc_fcp_iod *fod)
481 {
482         unsigned long flags;
483
484         spin_lock_irqsave(&queue->qlock, flags);
485         list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
486         fod->active = false;
487         spin_unlock_irqrestore(&queue->qlock, flags);
488
489         /*
490          * release the reference taken at queue lookup and fod allocation
491          */
492         nvmet_fc_tgt_q_put(queue);
493 }
494
495 static int
496 nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
497 {
498         int cpu, idx, cnt;
499
500         if (!(tgtport->ops->target_features &
501                         NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
502             tgtport->ops->max_hw_queues == 1)
503                 return WORK_CPU_UNBOUND;
504
505         /* Simple cpu selection based on qid modulo active cpu count */
506         idx = !qid ? 0 : (qid - 1) % num_active_cpus();
507
508         /* find the n'th active cpu */
509         for (cpu = 0, cnt = 0; ; ) {
510                 if (cpu_active(cpu)) {
511                         if (cnt == idx)
512                                 break;
513                         cnt++;
514                 }
515                 cpu = (cpu + 1) % num_possible_cpus();
516         }
517
518         return cpu;
519 }
520
521 static struct nvmet_fc_tgt_queue *
522 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
523                         u16 qid, u16 sqsize)
524 {
525         struct nvmet_fc_tgt_queue *queue;
526         unsigned long flags;
527         int ret;
528
529         if (qid >= NVMET_NR_QUEUES)
530                 return NULL;
531
532         queue = kzalloc((sizeof(*queue) +
533                                 (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
534                                 GFP_KERNEL);
535         if (!queue)
536                 return NULL;
537
538         if (!nvmet_fc_tgt_a_get(assoc))
539                 goto out_free_queue;
540
541         queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
542                                 assoc->tgtport->fc_target_port.port_num,
543                                 assoc->a_id, qid);
544         if (!queue->work_q)
545                 goto out_a_put;
546
547         queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
548         queue->qid = qid;
549         queue->sqsize = sqsize;
550         queue->assoc = assoc;
551         queue->port = assoc->tgtport->port;
552         queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
553         INIT_LIST_HEAD(&queue->fod_list);
554         atomic_set(&queue->connected, 0);
555         atomic_set(&queue->sqtail, 0);
556         atomic_set(&queue->rsn, 1);
557         atomic_set(&queue->zrspcnt, 0);
558         spin_lock_init(&queue->qlock);
559         kref_init(&queue->ref);
560
561         nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
562
563         ret = nvmet_sq_init(&queue->nvme_sq);
564         if (ret)
565                 goto out_fail_iodlist;
566
567         WARN_ON(assoc->queues[qid]);
568         spin_lock_irqsave(&assoc->tgtport->lock, flags);
569         assoc->queues[qid] = queue;
570         spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
571
572         return queue;
573
574 out_fail_iodlist:
575         nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
576         destroy_workqueue(queue->work_q);
577 out_a_put:
578         nvmet_fc_tgt_a_put(assoc);
579 out_free_queue:
580         kfree(queue);
581         return NULL;
582 }
583
584
585 static void
586 nvmet_fc_tgt_queue_free(struct kref *ref)
587 {
588         struct nvmet_fc_tgt_queue *queue =
589                 container_of(ref, struct nvmet_fc_tgt_queue, ref);
590         unsigned long flags;
591
592         spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
593         queue->assoc->queues[queue->qid] = NULL;
594         spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
595
596         nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
597
598         nvmet_fc_tgt_a_put(queue->assoc);
599
600         destroy_workqueue(queue->work_q);
601
602         kfree(queue);
603 }
604
605 static void
606 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
607 {
608         kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
609 }
610
611 static int
612 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
613 {
614         return kref_get_unless_zero(&queue->ref);
615 }
616
617
618 static void
619 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
620                                 struct nvmefc_tgt_fcp_req *fcpreq)
621 {
622         int ret;
623
624         fcpreq->op = NVMET_FCOP_ABORT;
625         fcpreq->offset = 0;
626         fcpreq->timeout = 0;
627         fcpreq->transfer_length = 0;
628         fcpreq->transferred_length = 0;
629         fcpreq->fcp_error = 0;
630         fcpreq->sg_cnt = 0;
631
632         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
633         if (ret)
634                 /* should never reach here !! */
635                 WARN_ON(1);
636 }
637
638
639 static void
640 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
641 {
642         struct nvmet_fc_fcp_iod *fod = queue->fod;
643         unsigned long flags;
644         int i;
645         bool disconnect;
646
647         disconnect = atomic_xchg(&queue->connected, 0);
648
649         spin_lock_irqsave(&queue->qlock, flags);
650         /* about outstanding io's */
651         for (i = 0; i < queue->sqsize; fod++, i++) {
652                 if (fod->active) {
653                         spin_lock(&fod->flock);
654                         fod->abort = true;
655                         spin_unlock(&fod->flock);
656                 }
657         }
658         spin_unlock_irqrestore(&queue->qlock, flags);
659
660         flush_workqueue(queue->work_q);
661
662         if (disconnect)
663                 nvmet_sq_destroy(&queue->nvme_sq);
664
665         nvmet_fc_tgt_q_put(queue);
666 }
667
668 static struct nvmet_fc_tgt_queue *
669 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
670                                 u64 connection_id)
671 {
672         struct nvmet_fc_tgt_assoc *assoc;
673         struct nvmet_fc_tgt_queue *queue;
674         u64 association_id = nvmet_fc_getassociationid(connection_id);
675         u16 qid = nvmet_fc_getqueueid(connection_id);
676         unsigned long flags;
677
678         spin_lock_irqsave(&tgtport->lock, flags);
679         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
680                 if (association_id == assoc->association_id) {
681                         queue = assoc->queues[qid];
682                         if (queue &&
683                             (!atomic_read(&queue->connected) ||
684                              !nvmet_fc_tgt_q_get(queue)))
685                                 queue = NULL;
686                         spin_unlock_irqrestore(&tgtport->lock, flags);
687                         return queue;
688                 }
689         }
690         spin_unlock_irqrestore(&tgtport->lock, flags);
691         return NULL;
692 }
693
694 static struct nvmet_fc_tgt_assoc *
695 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
696 {
697         struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
698         unsigned long flags;
699         u64 ran;
700         int idx;
701         bool needrandom = true;
702
703         assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
704         if (!assoc)
705                 return NULL;
706
707         idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
708         if (idx < 0)
709                 goto out_free_assoc;
710
711         if (!nvmet_fc_tgtport_get(tgtport))
712                 goto out_ida_put;
713
714         assoc->tgtport = tgtport;
715         assoc->a_id = idx;
716         INIT_LIST_HEAD(&assoc->a_list);
717         kref_init(&assoc->ref);
718
719         while (needrandom) {
720                 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
721                 ran = ran << BYTES_FOR_QID_SHIFT;
722
723                 spin_lock_irqsave(&tgtport->lock, flags);
724                 needrandom = false;
725                 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
726                         if (ran == tmpassoc->association_id) {
727                                 needrandom = true;
728                                 break;
729                         }
730                 if (!needrandom) {
731                         assoc->association_id = ran;
732                         list_add_tail(&assoc->a_list, &tgtport->assoc_list);
733                 }
734                 spin_unlock_irqrestore(&tgtport->lock, flags);
735         }
736
737         return assoc;
738
739 out_ida_put:
740         ida_simple_remove(&tgtport->assoc_cnt, idx);
741 out_free_assoc:
742         kfree(assoc);
743         return NULL;
744 }
745
746 static void
747 nvmet_fc_target_assoc_free(struct kref *ref)
748 {
749         struct nvmet_fc_tgt_assoc *assoc =
750                 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
751         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
752         unsigned long flags;
753
754         spin_lock_irqsave(&tgtport->lock, flags);
755         list_del(&assoc->a_list);
756         spin_unlock_irqrestore(&tgtport->lock, flags);
757         ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
758         kfree(assoc);
759         nvmet_fc_tgtport_put(tgtport);
760 }
761
762 static void
763 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
764 {
765         kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
766 }
767
768 static int
769 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
770 {
771         return kref_get_unless_zero(&assoc->ref);
772 }
773
774 static void
775 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
776 {
777         struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
778         struct nvmet_fc_tgt_queue *queue;
779         unsigned long flags;
780         int i;
781
782         spin_lock_irqsave(&tgtport->lock, flags);
783         for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
784                 queue = assoc->queues[i];
785                 if (queue) {
786                         if (!nvmet_fc_tgt_q_get(queue))
787                                 continue;
788                         spin_unlock_irqrestore(&tgtport->lock, flags);
789                         nvmet_fc_delete_target_queue(queue);
790                         nvmet_fc_tgt_q_put(queue);
791                         spin_lock_irqsave(&tgtport->lock, flags);
792                 }
793         }
794         spin_unlock_irqrestore(&tgtport->lock, flags);
795
796         nvmet_fc_tgt_a_put(assoc);
797 }
798
799 static struct nvmet_fc_tgt_assoc *
800 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
801                                 u64 association_id)
802 {
803         struct nvmet_fc_tgt_assoc *assoc;
804         struct nvmet_fc_tgt_assoc *ret = NULL;
805         unsigned long flags;
806
807         spin_lock_irqsave(&tgtport->lock, flags);
808         list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
809                 if (association_id == assoc->association_id) {
810                         ret = assoc;
811                         nvmet_fc_tgt_a_get(assoc);
812                         break;
813                 }
814         }
815         spin_unlock_irqrestore(&tgtport->lock, flags);
816
817         return ret;
818 }
819
820
821 /**
822  * nvme_fc_register_targetport - transport entry point called by an
823  *                              LLDD to register the existence of a local
824  *                              NVME subystem FC port.
825  * @pinfo:     pointer to information about the port to be registered
826  * @template:  LLDD entrypoints and operational parameters for the port
827  * @dev:       physical hardware device node port corresponds to. Will be
828  *             used for DMA mappings
829  * @portptr:   pointer to a local port pointer. Upon success, the routine
830  *             will allocate a nvme_fc_local_port structure and place its
831  *             address in the local port pointer. Upon failure, local port
832  *             pointer will be set to NULL.
833  *
834  * Returns:
835  * a completion status. Must be 0 upon success; a negative errno
836  * (ex: -ENXIO) upon failure.
837  */
838 int
839 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
840                         struct nvmet_fc_target_template *template,
841                         struct device *dev,
842                         struct nvmet_fc_target_port **portptr)
843 {
844         struct nvmet_fc_tgtport *newrec;
845         unsigned long flags;
846         int ret, idx;
847
848         if (!template->xmt_ls_rsp || !template->fcp_op ||
849             !template->targetport_delete ||
850             !template->max_hw_queues || !template->max_sgl_segments ||
851             !template->max_dif_sgl_segments || !template->dma_boundary) {
852                 ret = -EINVAL;
853                 goto out_regtgt_failed;
854         }
855
856         newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
857                          GFP_KERNEL);
858         if (!newrec) {
859                 ret = -ENOMEM;
860                 goto out_regtgt_failed;
861         }
862
863         idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
864         if (idx < 0) {
865                 ret = -ENOSPC;
866                 goto out_fail_kfree;
867         }
868
869         if (!get_device(dev) && dev) {
870                 ret = -ENODEV;
871                 goto out_ida_put;
872         }
873
874         newrec->fc_target_port.node_name = pinfo->node_name;
875         newrec->fc_target_port.port_name = pinfo->port_name;
876         newrec->fc_target_port.private = &newrec[1];
877         newrec->fc_target_port.port_id = pinfo->port_id;
878         newrec->fc_target_port.port_num = idx;
879         INIT_LIST_HEAD(&newrec->tgt_list);
880         newrec->dev = dev;
881         newrec->ops = template;
882         spin_lock_init(&newrec->lock);
883         INIT_LIST_HEAD(&newrec->ls_list);
884         INIT_LIST_HEAD(&newrec->ls_busylist);
885         INIT_LIST_HEAD(&newrec->assoc_list);
886         kref_init(&newrec->ref);
887         ida_init(&newrec->assoc_cnt);
888
889         ret = nvmet_fc_alloc_ls_iodlist(newrec);
890         if (ret) {
891                 ret = -ENOMEM;
892                 goto out_free_newrec;
893         }
894
895         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
896         list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
897         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
898
899         *portptr = &newrec->fc_target_port;
900         return 0;
901
902 out_free_newrec:
903         put_device(dev);
904 out_ida_put:
905         ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
906 out_fail_kfree:
907         kfree(newrec);
908 out_regtgt_failed:
909         *portptr = NULL;
910         return ret;
911 }
912 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
913
914
915 static void
916 nvmet_fc_free_tgtport(struct kref *ref)
917 {
918         struct nvmet_fc_tgtport *tgtport =
919                 container_of(ref, struct nvmet_fc_tgtport, ref);
920         struct device *dev = tgtport->dev;
921         unsigned long flags;
922
923         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
924         list_del(&tgtport->tgt_list);
925         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
926
927         nvmet_fc_free_ls_iodlist(tgtport);
928
929         /* let the LLDD know we've finished tearing it down */
930         tgtport->ops->targetport_delete(&tgtport->fc_target_port);
931
932         ida_simple_remove(&nvmet_fc_tgtport_cnt,
933                         tgtport->fc_target_port.port_num);
934
935         ida_destroy(&tgtport->assoc_cnt);
936
937         kfree(tgtport);
938
939         put_device(dev);
940 }
941
942 static void
943 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
944 {
945         kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
946 }
947
948 static int
949 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
950 {
951         return kref_get_unless_zero(&tgtport->ref);
952 }
953
954 static void
955 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
956 {
957         struct nvmet_fc_tgt_assoc *assoc, *next;
958         unsigned long flags;
959
960         spin_lock_irqsave(&tgtport->lock, flags);
961         list_for_each_entry_safe(assoc, next,
962                                 &tgtport->assoc_list, a_list) {
963                 if (!nvmet_fc_tgt_a_get(assoc))
964                         continue;
965                 spin_unlock_irqrestore(&tgtport->lock, flags);
966                 nvmet_fc_delete_target_assoc(assoc);
967                 nvmet_fc_tgt_a_put(assoc);
968                 spin_lock_irqsave(&tgtport->lock, flags);
969         }
970         spin_unlock_irqrestore(&tgtport->lock, flags);
971 }
972
973 /*
974  * nvmet layer has called to terminate an association
975  */
976 static void
977 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
978 {
979         struct nvmet_fc_tgtport *tgtport, *next;
980         struct nvmet_fc_tgt_assoc *assoc;
981         struct nvmet_fc_tgt_queue *queue;
982         unsigned long flags;
983         bool found_ctrl = false;
984
985         /* this is a bit ugly, but don't want to make locks layered */
986         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
987         list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
988                         tgt_list) {
989                 if (!nvmet_fc_tgtport_get(tgtport))
990                         continue;
991                 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
992
993                 spin_lock_irqsave(&tgtport->lock, flags);
994                 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
995                         queue = assoc->queues[0];
996                         if (queue && queue->nvme_sq.ctrl == ctrl) {
997                                 if (nvmet_fc_tgt_a_get(assoc))
998                                         found_ctrl = true;
999                                 break;
1000                         }
1001                 }
1002                 spin_unlock_irqrestore(&tgtport->lock, flags);
1003
1004                 nvmet_fc_tgtport_put(tgtport);
1005
1006                 if (found_ctrl) {
1007                         nvmet_fc_delete_target_assoc(assoc);
1008                         nvmet_fc_tgt_a_put(assoc);
1009                         return;
1010                 }
1011
1012                 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1013         }
1014         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1015 }
1016
1017 /**
1018  * nvme_fc_unregister_targetport - transport entry point called by an
1019  *                              LLDD to deregister/remove a previously
1020  *                              registered a local NVME subsystem FC port.
1021  * @tgtport: pointer to the (registered) target port that is to be
1022  *           deregistered.
1023  *
1024  * Returns:
1025  * a completion status. Must be 0 upon success; a negative errno
1026  * (ex: -ENXIO) upon failure.
1027  */
1028 int
1029 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1030 {
1031         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1032
1033         /* terminate any outstanding associations */
1034         __nvmet_fc_free_assocs(tgtport);
1035
1036         nvmet_fc_tgtport_put(tgtport);
1037
1038         return 0;
1039 }
1040 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1041
1042
1043 /* *********************** FC-NVME LS Handling **************************** */
1044
1045
1046 static void
1047 nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
1048 {
1049         struct fcnvme_ls_acc_hdr *acc = buf;
1050
1051         acc->w0.ls_cmd = ls_cmd;
1052         acc->desc_list_len = desc_len;
1053         acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
1054         acc->rqst.desc_len =
1055                         fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
1056         acc->rqst.w0.ls_cmd = rqst_ls_cmd;
1057 }
1058
1059 static int
1060 nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
1061                         u8 reason, u8 explanation, u8 vendor)
1062 {
1063         struct fcnvme_ls_rjt *rjt = buf;
1064
1065         nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
1066                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
1067                         ls_cmd);
1068         rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
1069         rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
1070         rjt->rjt.reason_code = reason;
1071         rjt->rjt.reason_explanation = explanation;
1072         rjt->rjt.vendor = vendor;
1073
1074         return sizeof(struct fcnvme_ls_rjt);
1075 }
1076
1077 /* Validation Error indexes into the string table below */
1078 enum {
1079         VERR_NO_ERROR           = 0,
1080         VERR_CR_ASSOC_LEN       = 1,
1081         VERR_CR_ASSOC_RQST_LEN  = 2,
1082         VERR_CR_ASSOC_CMD       = 3,
1083         VERR_CR_ASSOC_CMD_LEN   = 4,
1084         VERR_ERSP_RATIO         = 5,
1085         VERR_ASSOC_ALLOC_FAIL   = 6,
1086         VERR_QUEUE_ALLOC_FAIL   = 7,
1087         VERR_CR_CONN_LEN        = 8,
1088         VERR_CR_CONN_RQST_LEN   = 9,
1089         VERR_ASSOC_ID           = 10,
1090         VERR_ASSOC_ID_LEN       = 11,
1091         VERR_NO_ASSOC           = 12,
1092         VERR_CONN_ID            = 13,
1093         VERR_CONN_ID_LEN        = 14,
1094         VERR_NO_CONN            = 15,
1095         VERR_CR_CONN_CMD        = 16,
1096         VERR_CR_CONN_CMD_LEN    = 17,
1097         VERR_DISCONN_LEN        = 18,
1098         VERR_DISCONN_RQST_LEN   = 19,
1099         VERR_DISCONN_CMD        = 20,
1100         VERR_DISCONN_CMD_LEN    = 21,
1101         VERR_DISCONN_SCOPE      = 22,
1102         VERR_RS_LEN             = 23,
1103         VERR_RS_RQST_LEN        = 24,
1104         VERR_RS_CMD             = 25,
1105         VERR_RS_CMD_LEN         = 26,
1106         VERR_RS_RCTL            = 27,
1107         VERR_RS_RO              = 28,
1108 };
1109
1110 static char *validation_errors[] = {
1111         "OK",
1112         "Bad CR_ASSOC Length",
1113         "Bad CR_ASSOC Rqst Length",
1114         "Not CR_ASSOC Cmd",
1115         "Bad CR_ASSOC Cmd Length",
1116         "Bad Ersp Ratio",
1117         "Association Allocation Failed",
1118         "Queue Allocation Failed",
1119         "Bad CR_CONN Length",
1120         "Bad CR_CONN Rqst Length",
1121         "Not Association ID",
1122         "Bad Association ID Length",
1123         "No Association",
1124         "Not Connection ID",
1125         "Bad Connection ID Length",
1126         "No Connection",
1127         "Not CR_CONN Cmd",
1128         "Bad CR_CONN Cmd Length",
1129         "Bad DISCONN Length",
1130         "Bad DISCONN Rqst Length",
1131         "Not DISCONN Cmd",
1132         "Bad DISCONN Cmd Length",
1133         "Bad Disconnect Scope",
1134         "Bad RS Length",
1135         "Bad RS Rqst Length",
1136         "Not RS Cmd",
1137         "Bad RS Cmd Length",
1138         "Bad RS R_CTL",
1139         "Bad RS Relative Offset",
1140 };
1141
1142 static void
1143 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1144                         struct nvmet_fc_ls_iod *iod)
1145 {
1146         struct fcnvme_ls_cr_assoc_rqst *rqst =
1147                                 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1148         struct fcnvme_ls_cr_assoc_acc *acc =
1149                                 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1150         struct nvmet_fc_tgt_queue *queue;
1151         int ret = 0;
1152
1153         memset(acc, 0, sizeof(*acc));
1154
1155         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
1156                 ret = VERR_CR_ASSOC_LEN;
1157         else if (rqst->desc_list_len !=
1158                         fcnvme_lsdesc_len(
1159                                 sizeof(struct fcnvme_ls_cr_assoc_rqst)))
1160                 ret = VERR_CR_ASSOC_RQST_LEN;
1161         else if (rqst->assoc_cmd.desc_tag !=
1162                         cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1163                 ret = VERR_CR_ASSOC_CMD;
1164         else if (rqst->assoc_cmd.desc_len !=
1165                         fcnvme_lsdesc_len(
1166                                 sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
1167                 ret = VERR_CR_ASSOC_CMD_LEN;
1168         else if (!rqst->assoc_cmd.ersp_ratio ||
1169                  (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1170                                 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1171                 ret = VERR_ERSP_RATIO;
1172
1173         else {
1174                 /* new association w/ admin queue */
1175                 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1176                 if (!iod->assoc)
1177                         ret = VERR_ASSOC_ALLOC_FAIL;
1178                 else {
1179                         queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1180                                         be16_to_cpu(rqst->assoc_cmd.sqsize));
1181                         if (!queue)
1182                                 ret = VERR_QUEUE_ALLOC_FAIL;
1183                 }
1184         }
1185
1186         if (ret) {
1187                 dev_err(tgtport->dev,
1188                         "Create Association LS failed: %s\n",
1189                         validation_errors[ret]);
1190                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1191                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1192                                 FCNVME_RJT_RC_LOGIC,
1193                                 FCNVME_RJT_EXP_NONE, 0);
1194                 return;
1195         }
1196
1197         queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1198         atomic_set(&queue->connected, 1);
1199         queue->sqhd = 0;        /* best place to init value */
1200
1201         /* format a response */
1202
1203         iod->lsreq->rsplen = sizeof(*acc);
1204
1205         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1206                         fcnvme_lsdesc_len(
1207                                 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1208                         FCNVME_LS_CREATE_ASSOCIATION);
1209         acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1210         acc->associd.desc_len =
1211                         fcnvme_lsdesc_len(
1212                                 sizeof(struct fcnvme_lsdesc_assoc_id));
1213         acc->associd.association_id =
1214                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1215         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1216         acc->connectid.desc_len =
1217                         fcnvme_lsdesc_len(
1218                                 sizeof(struct fcnvme_lsdesc_conn_id));
1219         acc->connectid.connection_id = acc->associd.association_id;
1220 }
1221
1222 static void
1223 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1224                         struct nvmet_fc_ls_iod *iod)
1225 {
1226         struct fcnvme_ls_cr_conn_rqst *rqst =
1227                                 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1228         struct fcnvme_ls_cr_conn_acc *acc =
1229                                 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1230         struct nvmet_fc_tgt_queue *queue;
1231         int ret = 0;
1232
1233         memset(acc, 0, sizeof(*acc));
1234
1235         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1236                 ret = VERR_CR_CONN_LEN;
1237         else if (rqst->desc_list_len !=
1238                         fcnvme_lsdesc_len(
1239                                 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1240                 ret = VERR_CR_CONN_RQST_LEN;
1241         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1242                 ret = VERR_ASSOC_ID;
1243         else if (rqst->associd.desc_len !=
1244                         fcnvme_lsdesc_len(
1245                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1246                 ret = VERR_ASSOC_ID_LEN;
1247         else if (rqst->connect_cmd.desc_tag !=
1248                         cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1249                 ret = VERR_CR_CONN_CMD;
1250         else if (rqst->connect_cmd.desc_len !=
1251                         fcnvme_lsdesc_len(
1252                                 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1253                 ret = VERR_CR_CONN_CMD_LEN;
1254         else if (!rqst->connect_cmd.ersp_ratio ||
1255                  (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1256                                 be16_to_cpu(rqst->connect_cmd.sqsize)))
1257                 ret = VERR_ERSP_RATIO;
1258
1259         else {
1260                 /* new io queue */
1261                 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1262                                 be64_to_cpu(rqst->associd.association_id));
1263                 if (!iod->assoc)
1264                         ret = VERR_NO_ASSOC;
1265                 else {
1266                         queue = nvmet_fc_alloc_target_queue(iod->assoc,
1267                                         be16_to_cpu(rqst->connect_cmd.qid),
1268                                         be16_to_cpu(rqst->connect_cmd.sqsize));
1269                         if (!queue)
1270                                 ret = VERR_QUEUE_ALLOC_FAIL;
1271
1272                         /* release get taken in nvmet_fc_find_target_assoc */
1273                         nvmet_fc_tgt_a_put(iod->assoc);
1274                 }
1275         }
1276
1277         if (ret) {
1278                 dev_err(tgtport->dev,
1279                         "Create Connection LS failed: %s\n",
1280                         validation_errors[ret]);
1281                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1282                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1283                                 (ret == VERR_NO_ASSOC) ?
1284                                         FCNVME_RJT_RC_INV_ASSOC :
1285                                         FCNVME_RJT_RC_LOGIC,
1286                                 FCNVME_RJT_EXP_NONE, 0);
1287                 return;
1288         }
1289
1290         queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1291         atomic_set(&queue->connected, 1);
1292         queue->sqhd = 0;        /* best place to init value */
1293
1294         /* format a response */
1295
1296         iod->lsreq->rsplen = sizeof(*acc);
1297
1298         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1299                         fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1300                         FCNVME_LS_CREATE_CONNECTION);
1301         acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1302         acc->connectid.desc_len =
1303                         fcnvme_lsdesc_len(
1304                                 sizeof(struct fcnvme_lsdesc_conn_id));
1305         acc->connectid.connection_id =
1306                         cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1307                                 be16_to_cpu(rqst->connect_cmd.qid)));
1308 }
1309
1310 static void
1311 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1312                         struct nvmet_fc_ls_iod *iod)
1313 {
1314         struct fcnvme_ls_disconnect_rqst *rqst =
1315                         (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
1316         struct fcnvme_ls_disconnect_acc *acc =
1317                         (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
1318         struct nvmet_fc_tgt_queue *queue = NULL;
1319         struct nvmet_fc_tgt_assoc *assoc;
1320         int ret = 0;
1321         bool del_assoc = false;
1322
1323         memset(acc, 0, sizeof(*acc));
1324
1325         if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
1326                 ret = VERR_DISCONN_LEN;
1327         else if (rqst->desc_list_len !=
1328                         fcnvme_lsdesc_len(
1329                                 sizeof(struct fcnvme_ls_disconnect_rqst)))
1330                 ret = VERR_DISCONN_RQST_LEN;
1331         else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1332                 ret = VERR_ASSOC_ID;
1333         else if (rqst->associd.desc_len !=
1334                         fcnvme_lsdesc_len(
1335                                 sizeof(struct fcnvme_lsdesc_assoc_id)))
1336                 ret = VERR_ASSOC_ID_LEN;
1337         else if (rqst->discon_cmd.desc_tag !=
1338                         cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1339                 ret = VERR_DISCONN_CMD;
1340         else if (rqst->discon_cmd.desc_len !=
1341                         fcnvme_lsdesc_len(
1342                                 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1343                 ret = VERR_DISCONN_CMD_LEN;
1344         else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
1345                         (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
1346                 ret = VERR_DISCONN_SCOPE;
1347         else {
1348                 /* match an active association */
1349                 assoc = nvmet_fc_find_target_assoc(tgtport,
1350                                 be64_to_cpu(rqst->associd.association_id));
1351                 iod->assoc = assoc;
1352                 if (assoc) {
1353                         if (rqst->discon_cmd.scope ==
1354                                         FCNVME_DISCONN_CONNECTION) {
1355                                 queue = nvmet_fc_find_target_queue(tgtport,
1356                                                 be64_to_cpu(
1357                                                         rqst->discon_cmd.id));
1358                                 if (!queue) {
1359                                         nvmet_fc_tgt_a_put(assoc);
1360                                         ret = VERR_NO_CONN;
1361                                 }
1362                         }
1363                 } else
1364                         ret = VERR_NO_ASSOC;
1365         }
1366
1367         if (ret) {
1368                 dev_err(tgtport->dev,
1369                         "Disconnect LS failed: %s\n",
1370                         validation_errors[ret]);
1371                 iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
1372                                 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1373                                 (ret == VERR_NO_ASSOC) ?
1374                                         FCNVME_RJT_RC_INV_ASSOC :
1375                                         (ret == VERR_NO_CONN) ?
1376                                                 FCNVME_RJT_RC_INV_CONN :
1377                                                 FCNVME_RJT_RC_LOGIC,
1378                                 FCNVME_RJT_EXP_NONE, 0);
1379                 return;
1380         }
1381
1382         /* format a response */
1383
1384         iod->lsreq->rsplen = sizeof(*acc);
1385
1386         nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1387                         fcnvme_lsdesc_len(
1388                                 sizeof(struct fcnvme_ls_disconnect_acc)),
1389                         FCNVME_LS_DISCONNECT);
1390
1391
1392         /* are we to delete a Connection ID (queue) */
1393         if (queue) {
1394                 int qid = queue->qid;
1395
1396                 nvmet_fc_delete_target_queue(queue);
1397
1398                 /* release the get taken by find_target_queue */
1399                 nvmet_fc_tgt_q_put(queue);
1400
1401                 /* tear association down if io queue terminated */
1402                 if (!qid)
1403                         del_assoc = true;
1404         }
1405
1406         /* release get taken in nvmet_fc_find_target_assoc */
1407         nvmet_fc_tgt_a_put(iod->assoc);
1408
1409         if (del_assoc)
1410                 nvmet_fc_delete_target_assoc(iod->assoc);
1411 }
1412
1413
1414 /* *********************** NVME Ctrl Routines **************************** */
1415
1416
1417 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1418
1419 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1420
1421 static void
1422 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
1423 {
1424         struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
1425         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1426
1427         fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1428                                 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1429         nvmet_fc_free_ls_iod(tgtport, iod);
1430         nvmet_fc_tgtport_put(tgtport);
1431 }
1432
1433 static void
1434 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1435                                 struct nvmet_fc_ls_iod *iod)
1436 {
1437         int ret;
1438
1439         fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1440                                   NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1441
1442         ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
1443         if (ret)
1444                 nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
1445 }
1446
1447 /*
1448  * Actual processing routine for received FC-NVME LS Requests from the LLD
1449  */
1450 static void
1451 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1452                         struct nvmet_fc_ls_iod *iod)
1453 {
1454         struct fcnvme_ls_rqst_w0 *w0 =
1455                         (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1456
1457         iod->lsreq->nvmet_fc_private = iod;
1458         iod->lsreq->rspbuf = iod->rspbuf;
1459         iod->lsreq->rspdma = iod->rspdma;
1460         iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
1461         /* Be preventative. handlers will later set to valid length */
1462         iod->lsreq->rsplen = 0;
1463
1464         iod->assoc = NULL;
1465
1466         /*
1467          * handlers:
1468          *   parse request input, execute the request, and format the
1469          *   LS response
1470          */
1471         switch (w0->ls_cmd) {
1472         case FCNVME_LS_CREATE_ASSOCIATION:
1473                 /* Creates Association and initial Admin Queue/Connection */
1474                 nvmet_fc_ls_create_association(tgtport, iod);
1475                 break;
1476         case FCNVME_LS_CREATE_CONNECTION:
1477                 /* Creates an IO Queue/Connection */
1478                 nvmet_fc_ls_create_connection(tgtport, iod);
1479                 break;
1480         case FCNVME_LS_DISCONNECT:
1481                 /* Terminate a Queue/Connection or the Association */
1482                 nvmet_fc_ls_disconnect(tgtport, iod);
1483                 break;
1484         default:
1485                 iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
1486                                 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1487                                 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1488         }
1489
1490         nvmet_fc_xmt_ls_rsp(tgtport, iod);
1491 }
1492
1493 /*
1494  * Actual processing routine for received FC-NVME LS Requests from the LLD
1495  */
1496 static void
1497 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1498 {
1499         struct nvmet_fc_ls_iod *iod =
1500                 container_of(work, struct nvmet_fc_ls_iod, work);
1501         struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1502
1503         nvmet_fc_handle_ls_rqst(tgtport, iod);
1504 }
1505
1506
1507 /**
1508  * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1509  *                       upon the reception of a NVME LS request.
1510  *
1511  * The nvmet-fc layer will copy payload to an internal structure for
1512  * processing.  As such, upon completion of the routine, the LLDD may
1513  * immediately free/reuse the LS request buffer passed in the call.
1514  *
1515  * If this routine returns error, the LLDD should abort the exchange.
1516  *
1517  * @tgtport:    pointer to the (registered) target port the LS was
1518  *              received on.
1519  * @lsreq:      pointer to a lsreq request structure to be used to reference
1520  *              the exchange corresponding to the LS.
1521  * @lsreqbuf:   pointer to the buffer containing the LS Request
1522  * @lsreqbuf_len: length, in bytes, of the received LS request
1523  */
1524 int
1525 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1526                         struct nvmefc_tgt_ls_req *lsreq,
1527                         void *lsreqbuf, u32 lsreqbuf_len)
1528 {
1529         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1530         struct nvmet_fc_ls_iod *iod;
1531
1532         if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1533                 return -E2BIG;
1534
1535         if (!nvmet_fc_tgtport_get(tgtport))
1536                 return -ESHUTDOWN;
1537
1538         iod = nvmet_fc_alloc_ls_iod(tgtport);
1539         if (!iod) {
1540                 nvmet_fc_tgtport_put(tgtport);
1541                 return -ENOENT;
1542         }
1543
1544         iod->lsreq = lsreq;
1545         iod->fcpreq = NULL;
1546         memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1547         iod->rqstdatalen = lsreqbuf_len;
1548
1549         schedule_work(&iod->work);
1550
1551         return 0;
1552 }
1553 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1554
1555
1556 /*
1557  * **********************
1558  * Start of FCP handling
1559  * **********************
1560  */
1561
1562 static int
1563 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1564 {
1565         struct scatterlist *sg;
1566         struct page *page;
1567         unsigned int nent;
1568         u32 page_len, length;
1569         int i = 0;
1570
1571         length = fod->total_length;
1572         nent = DIV_ROUND_UP(length, PAGE_SIZE);
1573         sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
1574         if (!sg)
1575                 goto out;
1576
1577         sg_init_table(sg, nent);
1578
1579         while (length) {
1580                 page_len = min_t(u32, length, PAGE_SIZE);
1581
1582                 page = alloc_page(GFP_KERNEL);
1583                 if (!page)
1584                         goto out_free_pages;
1585
1586                 sg_set_page(&sg[i], page, page_len, 0);
1587                 length -= page_len;
1588                 i++;
1589         }
1590
1591         fod->data_sg = sg;
1592         fod->data_sg_cnt = nent;
1593         fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1594                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1595                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1596                                 /* note: write from initiator perspective */
1597
1598         return 0;
1599
1600 out_free_pages:
1601         while (i > 0) {
1602                 i--;
1603                 __free_page(sg_page(&sg[i]));
1604         }
1605         kfree(sg);
1606         fod->data_sg = NULL;
1607         fod->data_sg_cnt = 0;
1608 out:
1609         return NVME_SC_INTERNAL;
1610 }
1611
1612 static void
1613 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1614 {
1615         struct scatterlist *sg;
1616         int count;
1617
1618         if (!fod->data_sg || !fod->data_sg_cnt)
1619                 return;
1620
1621         fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1622                                 ((fod->io_dir == NVMET_FCP_WRITE) ?
1623                                         DMA_FROM_DEVICE : DMA_TO_DEVICE));
1624         for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
1625                 __free_page(sg_page(sg));
1626         kfree(fod->data_sg);
1627         fod->data_sg = NULL;
1628         fod->data_sg_cnt = 0;
1629 }
1630
1631
1632 static bool
1633 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1634 {
1635         u32 sqtail, used;
1636
1637         /* egad, this is ugly. And sqtail is just a best guess */
1638         sqtail = atomic_read(&q->sqtail) % q->sqsize;
1639
1640         used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1641         return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1642 }
1643
1644 /*
1645  * Prep RSP payload.
1646  * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1647  */
1648 static void
1649 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1650                                 struct nvmet_fc_fcp_iod *fod)
1651 {
1652         struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1653         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1654         struct nvme_completion *cqe = &ersp->cqe;
1655         u32 *cqewd = (u32 *)cqe;
1656         bool send_ersp = false;
1657         u32 rsn, rspcnt, xfr_length;
1658
1659         if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1660                 xfr_length = fod->total_length;
1661         else
1662                 xfr_length = fod->offset;
1663
1664         /*
1665          * check to see if we can send a 0's rsp.
1666          *   Note: to send a 0's response, the NVME-FC host transport will
1667          *   recreate the CQE. The host transport knows: sq id, SQHD (last
1668          *   seen in an ersp), and command_id. Thus it will create a
1669          *   zero-filled CQE with those known fields filled in. Transport
1670          *   must send an ersp for any condition where the cqe won't match
1671          *   this.
1672          *
1673          * Here are the FC-NVME mandated cases where we must send an ersp:
1674          *  every N responses, where N=ersp_ratio
1675          *  force fabric commands to send ersp's (not in FC-NVME but good
1676          *    practice)
1677          *  normal cmds: any time status is non-zero, or status is zero
1678          *     but words 0 or 1 are non-zero.
1679          *  the SQ is 90% or more full
1680          *  the cmd is a fused command
1681          *  transferred data length not equal to cmd iu length
1682          */
1683         rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1684         if (!(rspcnt % fod->queue->ersp_ratio) ||
1685             sqe->opcode == nvme_fabrics_command ||
1686             xfr_length != fod->total_length ||
1687             (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1688             (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1689             queue_90percent_full(fod->queue, cqe->sq_head))
1690                 send_ersp = true;
1691
1692         /* re-set the fields */
1693         fod->fcpreq->rspaddr = ersp;
1694         fod->fcpreq->rspdma = fod->rspdma;
1695
1696         if (!send_ersp) {
1697                 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1698                 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1699         } else {
1700                 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1701                 rsn = atomic_inc_return(&fod->queue->rsn);
1702                 ersp->rsn = cpu_to_be32(rsn);
1703                 ersp->xfrd_len = cpu_to_be32(xfr_length);
1704                 fod->fcpreq->rsplen = sizeof(*ersp);
1705         }
1706
1707         fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1708                                   sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1709 }
1710
1711 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1712
1713 static void
1714 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1715                                 struct nvmet_fc_fcp_iod *fod)
1716 {
1717         int ret;
1718
1719         fod->fcpreq->op = NVMET_FCOP_RSP;
1720         fod->fcpreq->timeout = 0;
1721
1722         nvmet_fc_prep_fcp_rsp(tgtport, fod);
1723
1724         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1725         if (ret)
1726                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1727 }
1728
1729 static void
1730 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1731                                 struct nvmet_fc_fcp_iod *fod, u8 op)
1732 {
1733         struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1734         struct scatterlist *sg, *datasg;
1735         u32 tlen, sg_off;
1736         int ret;
1737
1738         fcpreq->op = op;
1739         fcpreq->offset = fod->offset;
1740         fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1741         tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
1742                         (fod->total_length - fod->offset));
1743         tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
1744         tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
1745                                         * PAGE_SIZE);
1746         fcpreq->transfer_length = tlen;
1747         fcpreq->transferred_length = 0;
1748         fcpreq->fcp_error = 0;
1749         fcpreq->rsplen = 0;
1750
1751         fcpreq->sg_cnt = 0;
1752
1753         datasg = fod->next_sg;
1754         sg_off = fod->next_sg_offset;
1755
1756         for (sg = fcpreq->sg ; tlen; sg++) {
1757                 *sg = *datasg;
1758                 if (sg_off) {
1759                         sg->offset += sg_off;
1760                         sg->length -= sg_off;
1761                         sg->dma_address += sg_off;
1762                         sg_off = 0;
1763                 }
1764                 if (tlen < sg->length) {
1765                         sg->length = tlen;
1766                         fod->next_sg = datasg;
1767                         fod->next_sg_offset += tlen;
1768                 } else if (tlen == sg->length) {
1769                         fod->next_sg_offset = 0;
1770                         fod->next_sg = sg_next(datasg);
1771                 } else {
1772                         fod->next_sg_offset = 0;
1773                         datasg = sg_next(datasg);
1774                 }
1775                 tlen -= sg->length;
1776                 fcpreq->sg_cnt++;
1777         }
1778
1779         /*
1780          * If the last READDATA request: check if LLDD supports
1781          * combined xfr with response.
1782          */
1783         if ((op == NVMET_FCOP_READDATA) &&
1784             ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
1785             (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1786                 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1787                 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1788         }
1789
1790         ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1791         if (ret) {
1792                 /*
1793                  * should be ok to set w/o lock as its in the thread of
1794                  * execution (not an async timer routine) and doesn't
1795                  * contend with any clearing action
1796                  */
1797                 fod->abort = true;
1798
1799                 if (op == NVMET_FCOP_WRITEDATA)
1800                         nvmet_req_complete(&fod->req,
1801                                         NVME_SC_FC_TRANSPORT_ERROR);
1802                 else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1803                         fcpreq->fcp_error = ret;
1804                         fcpreq->transferred_length = 0;
1805                         nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1806                 }
1807         }
1808 }
1809
1810 static void
1811 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
1812 {
1813         struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
1814         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1815         unsigned long flags;
1816         bool abort;
1817
1818         spin_lock_irqsave(&fod->flock, flags);
1819         abort = fod->abort;
1820         spin_unlock_irqrestore(&fod->flock, flags);
1821
1822         /* if in the middle of an io and we need to tear down */
1823         if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
1824                 /* data no longer needed */
1825                 nvmet_fc_free_tgt_pgs(fod);
1826
1827                 nvmet_req_complete(&fod->req, fcpreq->fcp_error);
1828                 return;
1829         }
1830
1831         switch (fcpreq->op) {
1832
1833         case NVMET_FCOP_WRITEDATA:
1834                 if (fcpreq->fcp_error ||
1835                     fcpreq->transferred_length != fcpreq->transfer_length) {
1836                         nvmet_req_complete(&fod->req,
1837                                         NVME_SC_FC_TRANSPORT_ERROR);
1838                         return;
1839                 }
1840
1841                 fod->offset += fcpreq->transferred_length;
1842                 if (fod->offset != fod->total_length) {
1843                         /* transfer the next chunk */
1844                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1845                                                 NVMET_FCOP_WRITEDATA);
1846                         return;
1847                 }
1848
1849                 /* data transfer complete, resume with nvmet layer */
1850
1851                 fod->req.execute(&fod->req);
1852
1853                 break;
1854
1855         case NVMET_FCOP_READDATA:
1856         case NVMET_FCOP_READDATA_RSP:
1857                 if (fcpreq->fcp_error ||
1858                     fcpreq->transferred_length != fcpreq->transfer_length) {
1859                         /* data no longer needed */
1860                         nvmet_fc_free_tgt_pgs(fod);
1861
1862                         nvmet_fc_abort_op(tgtport, fod->fcpreq);
1863                         return;
1864                 }
1865
1866                 /* success */
1867
1868                 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1869                         /* data no longer needed */
1870                         nvmet_fc_free_tgt_pgs(fod);
1871                         fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1872                                         sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1873                         nvmet_fc_free_fcp_iod(fod->queue, fod);
1874                         return;
1875                 }
1876
1877                 fod->offset += fcpreq->transferred_length;
1878                 if (fod->offset != fod->total_length) {
1879                         /* transfer the next chunk */
1880                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1881                                                 NVMET_FCOP_READDATA);
1882                         return;
1883                 }
1884
1885                 /* data transfer complete, send response */
1886
1887                 /* data no longer needed */
1888                 nvmet_fc_free_tgt_pgs(fod);
1889
1890                 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1891
1892                 break;
1893
1894         case NVMET_FCOP_RSP:
1895         case NVMET_FCOP_ABORT:
1896                 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
1897                                 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1898                 nvmet_fc_free_fcp_iod(fod->queue, fod);
1899                 break;
1900
1901         default:
1902                 nvmet_fc_free_tgt_pgs(fod);
1903                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1904                 break;
1905         }
1906 }
1907
1908 /*
1909  * actual completion handler after execution by the nvmet layer
1910  */
1911 static void
1912 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
1913                         struct nvmet_fc_fcp_iod *fod, int status)
1914 {
1915         struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1916         struct nvme_completion *cqe = &fod->rspiubuf.cqe;
1917         unsigned long flags;
1918         bool abort;
1919
1920         spin_lock_irqsave(&fod->flock, flags);
1921         abort = fod->abort;
1922         spin_unlock_irqrestore(&fod->flock, flags);
1923
1924         /* if we have a CQE, snoop the last sq_head value */
1925         if (!status)
1926                 fod->queue->sqhd = cqe->sq_head;
1927
1928         if (abort) {
1929                 /* data no longer needed */
1930                 nvmet_fc_free_tgt_pgs(fod);
1931
1932                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
1933                 return;
1934         }
1935
1936         /* if an error handling the cmd post initial parsing */
1937         if (status) {
1938                 /* fudge up a failed CQE status for our transport error */
1939                 memset(cqe, 0, sizeof(*cqe));
1940                 cqe->sq_head = fod->queue->sqhd;        /* echo last cqe sqhd */
1941                 cqe->sq_id = cpu_to_le16(fod->queue->qid);
1942                 cqe->command_id = sqe->command_id;
1943                 cqe->status = cpu_to_le16(status);
1944         } else {
1945
1946                 /*
1947                  * try to push the data even if the SQE status is non-zero.
1948                  * There may be a status where data still was intended to
1949                  * be moved
1950                  */
1951                 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
1952                         /* push the data over before sending rsp */
1953                         nvmet_fc_transfer_fcp_data(tgtport, fod,
1954                                                 NVMET_FCOP_READDATA);
1955                         return;
1956                 }
1957
1958                 /* writes & no data - fall thru */
1959         }
1960
1961         /* data no longer needed */
1962         nvmet_fc_free_tgt_pgs(fod);
1963
1964         nvmet_fc_xmt_fcp_rsp(tgtport, fod);
1965 }
1966
1967
1968 static void
1969 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
1970 {
1971         struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
1972         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1973
1974         __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
1975 }
1976
1977
1978 /*
1979  * Actual processing routine for received FC-NVME LS Requests from the LLD
1980  */
1981 void
1982 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
1983                         struct nvmet_fc_fcp_iod *fod)
1984 {
1985         struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
1986         int ret;
1987
1988         /*
1989          * Fused commands are currently not supported in the linux
1990          * implementation.
1991          *
1992          * As such, the implementation of the FC transport does not
1993          * look at the fused commands and order delivery to the upper
1994          * layer until we have both based on csn.
1995          */
1996
1997         fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
1998
1999         fod->total_length = be32_to_cpu(cmdiu->data_len);
2000         if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2001                 fod->io_dir = NVMET_FCP_WRITE;
2002                 if (!nvme_is_write(&cmdiu->sqe))
2003                         goto transport_error;
2004         } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2005                 fod->io_dir = NVMET_FCP_READ;
2006                 if (nvme_is_write(&cmdiu->sqe))
2007                         goto transport_error;
2008         } else {
2009                 fod->io_dir = NVMET_FCP_NODATA;
2010                 if (fod->total_length)
2011                         goto transport_error;
2012         }
2013
2014         fod->req.cmd = &fod->cmdiubuf.sqe;
2015         fod->req.rsp = &fod->rspiubuf.cqe;
2016         fod->req.port = fod->queue->port;
2017
2018         /* ensure nvmet handlers will set cmd handler callback */
2019         fod->req.execute = NULL;
2020
2021         /* clear any response payload */
2022         memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2023
2024         ret = nvmet_req_init(&fod->req,
2025                                 &fod->queue->nvme_cq,
2026                                 &fod->queue->nvme_sq,
2027                                 &nvmet_fc_tgt_fcp_ops);
2028         if (!ret) {     /* bad SQE content */
2029                 nvmet_fc_abort_op(tgtport, fod->fcpreq);
2030                 return;
2031         }
2032
2033         /* keep a running counter of tail position */
2034         atomic_inc(&fod->queue->sqtail);
2035
2036         fod->data_sg = NULL;
2037         fod->data_sg_cnt = 0;
2038         if (fod->total_length) {
2039                 ret = nvmet_fc_alloc_tgt_pgs(fod);
2040                 if (ret) {
2041                         nvmet_req_complete(&fod->req, ret);
2042                         return;
2043                 }
2044         }
2045         fod->req.sg = fod->data_sg;
2046         fod->req.sg_cnt = fod->data_sg_cnt;
2047         fod->offset = 0;
2048         fod->next_sg = fod->data_sg;
2049         fod->next_sg_offset = 0;
2050
2051         if (fod->io_dir == NVMET_FCP_WRITE) {
2052                 /* pull the data over before invoking nvmet layer */
2053                 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2054                 return;
2055         }
2056
2057         /*
2058          * Reads or no data:
2059          *
2060          * can invoke the nvmet_layer now. If read data, cmd completion will
2061          * push the data
2062          */
2063
2064         fod->req.execute(&fod->req);
2065
2066         return;
2067
2068 transport_error:
2069         nvmet_fc_abort_op(tgtport, fod->fcpreq);
2070 }
2071
2072 /*
2073  * Actual processing routine for received FC-NVME LS Requests from the LLD
2074  */
2075 static void
2076 nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
2077 {
2078         struct nvmet_fc_fcp_iod *fod =
2079                 container_of(work, struct nvmet_fc_fcp_iod, work);
2080         struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2081
2082         nvmet_fc_handle_fcp_rqst(tgtport, fod);
2083 }
2084
2085 /**
2086  * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2087  *                       upon the reception of a NVME FCP CMD IU.
2088  *
2089  * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2090  * layer for processing.
2091  *
2092  * The nvmet-fc layer will copy cmd payload to an internal structure for
2093  * processing.  As such, upon completion of the routine, the LLDD may
2094  * immediately free/reuse the CMD IU buffer passed in the call.
2095  *
2096  * If this routine returns error, the lldd should abort the exchange.
2097  *
2098  * @target_port: pointer to the (registered) target port the FCP CMD IU
2099  *              was receive on.
2100  * @fcpreq:     pointer to a fcpreq request structure to be used to reference
2101  *              the exchange corresponding to the FCP Exchange.
2102  * @cmdiubuf:   pointer to the buffer containing the FCP CMD IU
2103  * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2104  */
2105 int
2106 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2107                         struct nvmefc_tgt_fcp_req *fcpreq,
2108                         void *cmdiubuf, u32 cmdiubuf_len)
2109 {
2110         struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2111         struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2112         struct nvmet_fc_tgt_queue *queue;
2113         struct nvmet_fc_fcp_iod *fod;
2114
2115         /* validate iu, so the connection id can be used to find the queue */
2116         if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2117                         (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
2118                         (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2119                         (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2120                 return -EIO;
2121
2122
2123         queue = nvmet_fc_find_target_queue(tgtport,
2124                                 be64_to_cpu(cmdiu->connection_id));
2125         if (!queue)
2126                 return -ENOTCONN;
2127
2128         /*
2129          * note: reference taken by find_target_queue
2130          * After successful fod allocation, the fod will inherit the
2131          * ownership of that reference and will remove the reference
2132          * when the fod is freed.
2133          */
2134
2135         fod = nvmet_fc_alloc_fcp_iod(queue);
2136         if (!fod) {
2137                 /* release the queue lookup reference */
2138                 nvmet_fc_tgt_q_put(queue);
2139                 return -ENOENT;
2140         }
2141
2142         fcpreq->nvmet_fc_private = fod;
2143         fod->fcpreq = fcpreq;
2144         /*
2145          * put all admin cmds on hw queue id 0. All io commands go to
2146          * the respective hw queue based on a modulo basis
2147          */
2148         fcpreq->hwqid = queue->qid ?
2149                         ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
2150         memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2151
2152         queue_work_on(queue->cpu, queue->work_q, &fod->work);
2153
2154         return 0;
2155 }
2156 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2157
2158 enum {
2159         FCT_TRADDR_ERR          = 0,
2160         FCT_TRADDR_WWNN         = 1 << 0,
2161         FCT_TRADDR_WWPN         = 1 << 1,
2162 };
2163
2164 struct nvmet_fc_traddr {
2165         u64     nn;
2166         u64     pn;
2167 };
2168
2169 static const match_table_t traddr_opt_tokens = {
2170         { FCT_TRADDR_WWNN,      "nn-%s"         },
2171         { FCT_TRADDR_WWPN,      "pn-%s"         },
2172         { FCT_TRADDR_ERR,       NULL            }
2173 };
2174
2175 static int
2176 nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
2177 {
2178         substring_t args[MAX_OPT_ARGS];
2179         char *options, *o, *p;
2180         int token, ret = 0;
2181         u64 token64;
2182
2183         options = o = kstrdup(buf, GFP_KERNEL);
2184         if (!options)
2185                 return -ENOMEM;
2186
2187         while ((p = strsep(&o, ",\n")) != NULL) {
2188                 if (!*p)
2189                         continue;
2190
2191                 token = match_token(p, traddr_opt_tokens, args);
2192                 switch (token) {
2193                 case FCT_TRADDR_WWNN:
2194                         if (match_u64(args, &token64)) {
2195                                 ret = -EINVAL;
2196                                 goto out;
2197                         }
2198                         traddr->nn = token64;
2199                         break;
2200                 case FCT_TRADDR_WWPN:
2201                         if (match_u64(args, &token64)) {
2202                                 ret = -EINVAL;
2203                                 goto out;
2204                         }
2205                         traddr->pn = token64;
2206                         break;
2207                 default:
2208                         pr_warn("unknown traddr token or missing value '%s'\n",
2209                                         p);
2210                         ret = -EINVAL;
2211                         goto out;
2212                 }
2213         }
2214
2215 out:
2216         kfree(options);
2217         return ret;
2218 }
2219
2220 static int
2221 nvmet_fc_add_port(struct nvmet_port *port)
2222 {
2223         struct nvmet_fc_tgtport *tgtport;
2224         struct nvmet_fc_traddr traddr = { 0L, 0L };
2225         unsigned long flags;
2226         int ret;
2227
2228         /* validate the address info */
2229         if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2230             (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2231                 return -EINVAL;
2232
2233         /* map the traddr address info to a target port */
2234
2235         ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
2236         if (ret)
2237                 return ret;
2238
2239         ret = -ENXIO;
2240         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2241         list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2242                 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2243                     (tgtport->fc_target_port.port_name == traddr.pn)) {
2244                         /* a FC port can only be 1 nvmet port id */
2245                         if (!tgtport->port) {
2246                                 tgtport->port = port;
2247                                 port->priv = tgtport;
2248                                 ret = 0;
2249                         } else
2250                                 ret = -EALREADY;
2251                         break;
2252                 }
2253         }
2254         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2255         return ret;
2256 }
2257
2258 static void
2259 nvmet_fc_remove_port(struct nvmet_port *port)
2260 {
2261         struct nvmet_fc_tgtport *tgtport = port->priv;
2262         unsigned long flags;
2263
2264         spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2265         if (tgtport->port == port) {
2266                 nvmet_fc_tgtport_put(tgtport);
2267                 tgtport->port = NULL;
2268         }
2269         spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2270 }
2271
2272 static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2273         .owner                  = THIS_MODULE,
2274         .type                   = NVMF_TRTYPE_FC,
2275         .msdbd                  = 1,
2276         .add_port               = nvmet_fc_add_port,
2277         .remove_port            = nvmet_fc_remove_port,
2278         .queue_response         = nvmet_fc_fcp_nvme_cmd_done,
2279         .delete_ctrl            = nvmet_fc_delete_ctrl,
2280 };
2281
2282 static int __init nvmet_fc_init_module(void)
2283 {
2284         return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2285 }
2286
2287 static void __exit nvmet_fc_exit_module(void)
2288 {
2289         /* sanity check - all lports should be removed */
2290         if (!list_empty(&nvmet_fc_target_list))
2291                 pr_warn("%s: targetport list not empty\n", __func__);
2292
2293         nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2294
2295         ida_destroy(&nvmet_fc_tgtport_cnt);
2296 }
2297
2298 module_init(nvmet_fc_init_module);
2299 module_exit(nvmet_fc_exit_module);
2300
2301 MODULE_LICENSE("GPL v2");