]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/lpfc/lpfc_nvme.c
um/arch_prctl: Fix fallout from x86 arch_prctl() rework
[karo-tx-linux.git] / drivers / scsi / lpfc / lpfc_nvme.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <linux/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41 #include <linux/nvme-fc.h>
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_vport.h"
55 #include "lpfc_debugfs.h"
56
57 /* NVME initiator-based functions */
58
59 static struct lpfc_nvme_buf *
60 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp);
61
62 static void
63 lpfc_release_nvme_buf(struct lpfc_hba *, struct lpfc_nvme_buf *);
64
65
66 /**
67  * lpfc_nvme_create_queue -
68  * @lpfc_pnvme: Pointer to the driver's nvme instance data
69  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
70  * @handle: An opaque driver handle used in follow-up calls.
71  *
72  * Driver registers this routine to preallocate and initialize any
73  * internal data structures to bind the @qidx to its internal IO queues.
74  * A hardware queue maps (qidx) to a specific driver MSI-X vector/EQ/CQ/WQ.
75  *
76  * Return value :
77  *   0 - Success
78  *   -EINVAL - Unsupported input value.
79  *   -ENOMEM - Could not alloc necessary memory
80  **/
81 static int
82 lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport,
83                        unsigned int qidx, u16 qsize,
84                        void **handle)
85 {
86         struct lpfc_nvme_lport *lport;
87         struct lpfc_vport *vport;
88         struct lpfc_nvme_qhandle *qhandle;
89         char *str;
90
91         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
92         vport = lport->vport;
93         qhandle = kzalloc(sizeof(struct lpfc_nvme_qhandle), GFP_KERNEL);
94         if (qhandle == NULL)
95                 return -ENOMEM;
96
97         qhandle->cpu_id = smp_processor_id();
98         qhandle->qidx = qidx;
99         /*
100          * NVME qidx == 0 is the admin queue, so both admin queue
101          * and first IO queue will use MSI-X vector and associated
102          * EQ/CQ/WQ at index 0. After that they are sequentially assigned.
103          */
104         if (qidx) {
105                 str = "IO ";  /* IO queue */
106                 qhandle->index = ((qidx - 1) %
107                         vport->phba->cfg_nvme_io_channel);
108         } else {
109                 str = "ADM";  /* Admin queue */
110                 qhandle->index = qidx;
111         }
112
113         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
114                          "6073 Binding %s HdwQueue %d  (cpu %d) to "
115                          "io_channel %d qhandle %p\n", str,
116                          qidx, qhandle->cpu_id, qhandle->index, qhandle);
117         *handle = (void *)qhandle;
118         return 0;
119 }
120
121 /**
122  * lpfc_nvme_delete_queue -
123  * @lpfc_pnvme: Pointer to the driver's nvme instance data
124  * @qidx: An cpu index used to affinitize IO queues and MSIX vectors.
125  * @handle: An opaque driver handle from lpfc_nvme_create_queue
126  *
127  * Driver registers this routine to free
128  * any internal data structures to bind the @qidx to its internal
129  * IO queues.
130  *
131  * Return value :
132  *   0 - Success
133  *   TODO:  What are the failure codes.
134  **/
135 static void
136 lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport,
137                        unsigned int qidx,
138                        void *handle)
139 {
140         struct lpfc_nvme_lport *lport;
141         struct lpfc_vport *vport;
142
143         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
144         vport = lport->vport;
145
146         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
147                         "6001 ENTER.  lpfc_pnvme %p, qidx x%xi qhandle %p\n",
148                         lport, qidx, handle);
149         kfree(handle);
150 }
151
152 static void
153 lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
154 {
155         struct lpfc_nvme_lport *lport = localport->private;
156
157         /* release any threads waiting for the unreg to complete */
158         complete(&lport->lport_unreg_done);
159 }
160
161 /* lpfc_nvme_remoteport_delete
162  *
163  * @remoteport: Pointer to an nvme transport remoteport instance.
164  *
165  * This is a template downcall.  NVME transport calls this function
166  * when it has completed the unregistration of a previously
167  * registered remoteport.
168  *
169  * Return value :
170  * None
171  */
172 void
173 lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport)
174 {
175         struct lpfc_nvme_rport *rport = remoteport->private;
176         struct lpfc_vport *vport;
177         struct lpfc_nodelist *ndlp;
178
179         ndlp = rport->ndlp;
180         if (!ndlp)
181                 goto rport_err;
182
183         vport = ndlp->vport;
184         if (!vport)
185                 goto rport_err;
186
187         /* Remove this rport from the lport's list - memory is owned by the
188          * transport. Remove the ndlp reference for the NVME transport before
189          * calling state machine to remove the node, this is devloss = 0
190          * semantics.
191          */
192         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
193                         "6146 remoteport delete complete %p\n",
194                         remoteport);
195         list_del(&rport->list);
196         lpfc_nlp_put(ndlp);
197
198  rport_err:
199         /* This call has to execute as long as the rport is valid.
200          * Release any threads waiting for the unreg to complete.
201          */
202         complete(&rport->rport_unreg_done);
203 }
204
205 static void
206 lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
207                        struct lpfc_wcqe_complete *wcqe)
208 {
209         struct lpfc_vport *vport = cmdwqe->vport;
210         uint32_t status;
211         struct nvmefc_ls_req *pnvme_lsreq;
212         struct lpfc_dmabuf *buf_ptr;
213         struct lpfc_nodelist *ndlp;
214
215         vport->phba->fc4NvmeLsCmpls++;
216
217         pnvme_lsreq = (struct nvmefc_ls_req *)cmdwqe->context2;
218         status = bf_get(lpfc_wcqe_c_status, wcqe) & LPFC_IOCB_STATUS_MASK;
219         ndlp = (struct lpfc_nodelist *)cmdwqe->context1;
220         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
221                          "6047 nvme cmpl Enter "
222                          "Data %p DID %x Xri: %x status %x cmd:%p lsreg:%p "
223                          "bmp:%p ndlp:%p\n",
224                          pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
225                          cmdwqe->sli4_xritag, status,
226                          cmdwqe, pnvme_lsreq, cmdwqe->context3, ndlp);
227
228         lpfc_nvmeio_data(phba, "NVME LS  CMPL: xri x%x stat x%x parm x%x\n",
229                          cmdwqe->sli4_xritag, status, wcqe->parameter);
230
231         if (cmdwqe->context3) {
232                 buf_ptr = (struct lpfc_dmabuf *)cmdwqe->context3;
233                 lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
234                 kfree(buf_ptr);
235                 cmdwqe->context3 = NULL;
236         }
237         if (pnvme_lsreq->done)
238                 pnvme_lsreq->done(pnvme_lsreq, status);
239         else
240                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
241                                  "6046 nvme cmpl without done call back? "
242                                  "Data %p DID %x Xri: %x status %x\n",
243                                 pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0,
244                                 cmdwqe->sli4_xritag, status);
245         if (ndlp) {
246                 lpfc_nlp_put(ndlp);
247                 cmdwqe->context1 = NULL;
248         }
249         lpfc_sli_release_iocbq(phba, cmdwqe);
250 }
251
252 static int
253 lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
254                   struct lpfc_dmabuf *inp,
255                  struct nvmefc_ls_req *pnvme_lsreq,
256              void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *,
257                            struct lpfc_wcqe_complete *),
258              struct lpfc_nodelist *ndlp, uint32_t num_entry,
259              uint32_t tmo, uint8_t retry)
260 {
261         struct lpfc_hba  *phba = vport->phba;
262         union lpfc_wqe *wqe;
263         struct lpfc_iocbq *genwqe;
264         struct ulp_bde64 *bpl;
265         struct ulp_bde64 bde;
266         int i, rc, xmit_len, first_len;
267
268         /* Allocate buffer for  command WQE */
269         genwqe = lpfc_sli_get_iocbq(phba);
270         if (genwqe == NULL)
271                 return 1;
272
273         wqe = &genwqe->wqe;
274         memset(wqe, 0, sizeof(union lpfc_wqe));
275
276         genwqe->context3 = (uint8_t *)bmp;
277         genwqe->iocb_flag |= LPFC_IO_NVME_LS;
278
279         /* Save for completion so we can release these resources */
280         genwqe->context1 = lpfc_nlp_get(ndlp);
281         genwqe->context2 = (uint8_t *)pnvme_lsreq;
282         /* Fill in payload, bp points to frame payload */
283
284         if (!tmo)
285                 /* FC spec states we need 3 * ratov for CT requests */
286                 tmo = (3 * phba->fc_ratov);
287
288         /* For this command calculate the xmit length of the request bde. */
289         xmit_len = 0;
290         first_len = 0;
291         bpl = (struct ulp_bde64 *)bmp->virt;
292         for (i = 0; i < num_entry; i++) {
293                 bde.tus.w = bpl[i].tus.w;
294                 if (bde.tus.f.bdeFlags != BUFF_TYPE_BDE_64)
295                         break;
296                 xmit_len += bde.tus.f.bdeSize;
297                 if (i == 0)
298                         first_len = xmit_len;
299         }
300
301         genwqe->rsvd2 = num_entry;
302         genwqe->hba_wqidx = 0;
303
304         /* Words 0 - 2 */
305         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
306         wqe->generic.bde.tus.f.bdeSize = first_len;
307         wqe->generic.bde.addrLow = bpl[0].addrLow;
308         wqe->generic.bde.addrHigh = bpl[0].addrHigh;
309
310         /* Word 3 */
311         wqe->gen_req.request_payload_len = first_len;
312
313         /* Word 4 */
314
315         /* Word 5 */
316         bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
317         bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
318         bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
319         bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
320         bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
321
322         /* Word 6 */
323         bf_set(wqe_ctxt_tag, &wqe->gen_req.wqe_com,
324                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
325         bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
326
327         /* Word 7 */
328         bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
329         bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
330         bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
331         bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
332
333         /* Word 8 */
334         wqe->gen_req.wqe_com.abort_tag = genwqe->iotag;
335
336         /* Word 9 */
337         bf_set(wqe_reqtag, &wqe->gen_req.wqe_com, genwqe->iotag);
338
339         /* Word 10 */
340         bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
341         bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
342         bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
343         bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
344         bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
345
346         /* Word 11 */
347         bf_set(wqe_cqid, &wqe->gen_req.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
348         bf_set(wqe_cmd_type, &wqe->gen_req.wqe_com, OTHER_COMMAND);
349
350
351         /* Issue GEN REQ WQE for NPORT <did> */
352         lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
353                          "6050 Issue GEN REQ WQE to NPORT x%x "
354                          "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n",
355                          ndlp->nlp_DID, genwqe->iotag,
356                          vport->port_state,
357                         genwqe, pnvme_lsreq, bmp, xmit_len, first_len);
358         genwqe->wqe_cmpl = cmpl;
359         genwqe->iocb_cmpl = NULL;
360         genwqe->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT;
361         genwqe->vport = vport;
362         genwqe->retry = retry;
363
364         lpfc_nvmeio_data(phba, "NVME LS  XMIT: xri x%x iotag x%x to x%06x\n",
365                          genwqe->sli4_xritag, genwqe->iotag, ndlp->nlp_DID);
366
367         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, genwqe);
368         if (rc == WQE_ERROR) {
369                 lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
370                                  "6045 Issue GEN REQ WQE to NPORT x%x "
371                                  "Data: x%x x%x\n",
372                                  ndlp->nlp_DID, genwqe->iotag,
373                                  vport->port_state);
374                 lpfc_sli_release_iocbq(phba, genwqe);
375                 return 1;
376         }
377         return 0;
378 }
379
380 /**
381  * lpfc_nvme_ls_req - Issue an Link Service request
382  * @lpfc_pnvme: Pointer to the driver's nvme instance data
383  * @lpfc_nvme_lport: Pointer to the driver's local port data
384  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
385  *
386  * Driver registers this routine to handle any link service request
387  * from the nvme_fc transport to a remote nvme-aware port.
388  *
389  * Return value :
390  *   0 - Success
391  *   TODO: What are the failure codes.
392  **/
393 static int
394 lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport,
395                  struct nvme_fc_remote_port *pnvme_rport,
396                  struct nvmefc_ls_req *pnvme_lsreq)
397 {
398         int ret = 0;
399         struct lpfc_nvme_lport *lport;
400         struct lpfc_vport *vport;
401         struct lpfc_nodelist *ndlp;
402         struct ulp_bde64 *bpl;
403         struct lpfc_dmabuf *bmp;
404
405         /* there are two dma buf in the request, actually there is one and
406          * the second one is just the start address + cmd size.
407          * Before calling lpfc_nvme_gen_req these buffers need to be wrapped
408          * in a lpfc_dmabuf struct. When freeing we just free the wrapper
409          * because the nvem layer owns the data bufs.
410          * We do not have to break these packets open, we don't care what is in
411          * them. And we do not have to look at the resonse data, we only care
412          * that we got a response. All of the caring is going to happen in the
413          * nvme-fc layer.
414          */
415
416         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
417         vport = lport->vport;
418
419         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
420         if (!ndlp) {
421                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
422                                  "6043 Could not find node for DID %x\n",
423                                  pnvme_rport->port_id);
424                 return 1;
425         }
426         bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
427         if (!bmp) {
428
429                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
430                                  "6044 Could not find node for DID %x\n",
431                                  pnvme_rport->port_id);
432                 return 2;
433         }
434         INIT_LIST_HEAD(&bmp->list);
435         bmp->virt = lpfc_mbuf_alloc(vport->phba, MEM_PRI, &(bmp->phys));
436         if (!bmp->virt) {
437                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
438                                  "6042 Could not find node for DID %x\n",
439                                  pnvme_rport->port_id);
440                 kfree(bmp);
441                 return 3;
442         }
443         bpl = (struct ulp_bde64 *)bmp->virt;
444         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rqstdma));
445         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rqstdma));
446         bpl->tus.f.bdeFlags = 0;
447         bpl->tus.f.bdeSize = pnvme_lsreq->rqstlen;
448         bpl->tus.w = le32_to_cpu(bpl->tus.w);
449         bpl++;
450
451         bpl->addrHigh = le32_to_cpu(putPaddrHigh(pnvme_lsreq->rspdma));
452         bpl->addrLow = le32_to_cpu(putPaddrLow(pnvme_lsreq->rspdma));
453         bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
454         bpl->tus.f.bdeSize = pnvme_lsreq->rsplen;
455         bpl->tus.w = le32_to_cpu(bpl->tus.w);
456
457         /* Expand print to include key fields. */
458         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
459                          "6051 ENTER.  lport %p, rport %p lsreq%p rqstlen:%d "
460                          "rsplen:%d %pad %pad\n",
461                          pnvme_lport, pnvme_rport,
462                          pnvme_lsreq, pnvme_lsreq->rqstlen,
463                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
464                          &pnvme_lsreq->rspdma);
465
466         vport->phba->fc4NvmeLsRequests++;
467
468         /* Hardcode the wait to 30 seconds.  Connections are failing otherwise.
469          * This code allows it all to work.
470          */
471         ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
472                                 pnvme_lsreq, lpfc_nvme_cmpl_gen_req,
473                                 ndlp, 2, 30, 0);
474         if (ret != WQE_SUCCESS) {
475                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
476                                  "6052 EXIT. issue ls wqe failed lport %p, "
477                                  "rport %p lsreq%p Status %x DID %x\n",
478                                  pnvme_lport, pnvme_rport, pnvme_lsreq,
479                                  ret, ndlp->nlp_DID);
480                 lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys);
481                 kfree(bmp);
482                 return ret;
483         }
484
485         /* Stub in routine and return 0 for now. */
486         return ret;
487 }
488
489 /**
490  * lpfc_nvme_ls_abort - Issue an Link Service request
491  * @lpfc_pnvme: Pointer to the driver's nvme instance data
492  * @lpfc_nvme_lport: Pointer to the driver's local port data
493  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
494  *
495  * Driver registers this routine to handle any link service request
496  * from the nvme_fc transport to a remote nvme-aware port.
497  *
498  * Return value :
499  *   0 - Success
500  *   TODO: What are the failure codes.
501  **/
502 static void
503 lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport,
504                    struct nvme_fc_remote_port *pnvme_rport,
505                    struct nvmefc_ls_req *pnvme_lsreq)
506 {
507         struct lpfc_nvme_lport *lport;
508         struct lpfc_vport *vport;
509         struct lpfc_hba *phba;
510         struct lpfc_nodelist *ndlp;
511         LIST_HEAD(abort_list);
512         struct lpfc_sli_ring *pring;
513         struct lpfc_iocbq *wqe, *next_wqe;
514
515         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
516         vport = lport->vport;
517         phba = vport->phba;
518
519         ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
520         if (!ndlp) {
521                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
522                                  "6049 Could not find node for DID %x\n",
523                                  pnvme_rport->port_id);
524                 return;
525         }
526
527         /* Expand print to include key fields. */
528         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS,
529                          "6040 ENTER.  lport %p, rport %p lsreq %p rqstlen:%d "
530                          "rsplen:%d %pad %pad\n",
531                          pnvme_lport, pnvme_rport,
532                          pnvme_lsreq, pnvme_lsreq->rqstlen,
533                          pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma,
534                          &pnvme_lsreq->rspdma);
535
536         /*
537          * Lock the ELS ring txcmplq and build a local list of all ELS IOs
538          * that need an ABTS.  The IOs need to stay on the txcmplq so that
539          * the abort operation completes them successfully.
540          */
541         pring = phba->sli4_hba.nvmels_wq->pring;
542         spin_lock_irq(&phba->hbalock);
543         spin_lock(&pring->ring_lock);
544         list_for_each_entry_safe(wqe, next_wqe, &pring->txcmplq, list) {
545                 /* Add to abort_list on on NDLP match. */
546                 if (lpfc_check_sli_ndlp(phba, pring, wqe, ndlp)) {
547                         wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
548                         list_add_tail(&wqe->dlist, &abort_list);
549                 }
550         }
551         spin_unlock(&pring->ring_lock);
552         spin_unlock_irq(&phba->hbalock);
553
554         /* Abort the targeted IOs and remove them from the abort list. */
555         list_for_each_entry_safe(wqe, next_wqe, &abort_list, dlist) {
556                 spin_lock_irq(&phba->hbalock);
557                 list_del_init(&wqe->dlist);
558                 lpfc_sli_issue_abort_iotag(phba, pring, wqe);
559                 spin_unlock_irq(&phba->hbalock);
560         }
561 }
562
563 /* Fix up the existing sgls for NVME IO. */
564 static void
565 lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
566                        struct lpfc_nvme_buf *lpfc_ncmd,
567                        struct nvmefc_fcp_req *nCmd)
568 {
569         struct sli4_sge *sgl;
570         union lpfc_wqe128 *wqe;
571         uint32_t *wptr, *dptr;
572
573         /*
574          * Adjust the FCP_CMD and FCP_RSP DMA data and sge_len to
575          * match NVME.  NVME sends 96 bytes. Also, use the
576          * nvme commands command and response dma addresses
577          * rather than the virtual memory to ease the restore
578          * operation.
579          */
580         sgl = lpfc_ncmd->nvme_sgl;
581         sgl->sge_len = cpu_to_le32(nCmd->cmdlen);
582
583         sgl++;
584
585         /* Setup the physical region for the FCP RSP */
586         sgl->addr_hi = cpu_to_le32(putPaddrHigh(nCmd->rspdma));
587         sgl->addr_lo = cpu_to_le32(putPaddrLow(nCmd->rspdma));
588         sgl->word2 = le32_to_cpu(sgl->word2);
589         if (nCmd->sg_cnt)
590                 bf_set(lpfc_sli4_sge_last, sgl, 0);
591         else
592                 bf_set(lpfc_sli4_sge_last, sgl, 1);
593         sgl->word2 = cpu_to_le32(sgl->word2);
594         sgl->sge_len = cpu_to_le32(nCmd->rsplen);
595
596         /*
597          * Get a local pointer to the built-in wqe and correct
598          * the cmd size to match NVME's 96 bytes and fix
599          * the dma address.
600          */
601
602         /* 128 byte wqe support here */
603         wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
604
605         /* Word 0-2 - NVME CMND IU (embedded payload) */
606         wqe->generic.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_IMMED;
607         wqe->generic.bde.tus.f.bdeSize = 60;
608         wqe->generic.bde.addrHigh = 0;
609         wqe->generic.bde.addrLow =  64;  /* Word 16 */
610
611         /* Word 3 */
612         bf_set(payload_offset_len, &wqe->fcp_icmd,
613                (nCmd->rsplen + nCmd->cmdlen));
614
615         /* Word 10 */
616         bf_set(wqe_nvme, &wqe->fcp_icmd.wqe_com, 1);
617         bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
618
619         /*
620          * Embed the payload in the last half of the WQE
621          * WQE words 16-30 get the NVME CMD IU payload
622          *
623          * WQE Word 16 is already setup with flags
624          * WQE words 17-19 get payload Words 2-4
625          * WQE words 20-21 get payload Words 6-7
626          * WQE words 22-29 get payload Words 16-23
627          */
628         wptr = &wqe->words[17];  /* WQE ptr */
629         dptr = (uint32_t *)nCmd->cmdaddr;  /* payload ptr */
630         dptr += 2;              /* Skip Words 0-1 in payload */
631
632         *wptr++ = *dptr++;      /* Word 2 */
633         *wptr++ = *dptr++;      /* Word 3 */
634         *wptr++ = *dptr++;      /* Word 4 */
635         dptr++;                 /* Skip Word 5 in payload */
636         *wptr++ = *dptr++;      /* Word 6 */
637         *wptr++ = *dptr++;      /* Word 7 */
638         dptr += 8;              /* Skip Words 8-15 in payload */
639         *wptr++ = *dptr++;      /* Word 16 */
640         *wptr++ = *dptr++;      /* Word 17 */
641         *wptr++ = *dptr++;      /* Word 18 */
642         *wptr++ = *dptr++;      /* Word 19 */
643         *wptr++ = *dptr++;      /* Word 20 */
644         *wptr++ = *dptr++;      /* Word 21 */
645         *wptr++ = *dptr++;      /* Word 22 */
646         *wptr   = *dptr;        /* Word 23 */
647 }
648
649 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
650 static void
651 lpfc_nvme_ktime(struct lpfc_hba *phba,
652                 struct lpfc_nvme_buf *lpfc_ncmd)
653 {
654         uint64_t seg1, seg2, seg3, seg4;
655
656         if (!phba->ktime_on)
657                 return;
658         if (!lpfc_ncmd->ts_last_cmd ||
659             !lpfc_ncmd->ts_cmd_start ||
660             !lpfc_ncmd->ts_cmd_wqput ||
661             !lpfc_ncmd->ts_isr_cmpl ||
662             !lpfc_ncmd->ts_data_nvme)
663                 return;
664         if (lpfc_ncmd->ts_cmd_start < lpfc_ncmd->ts_last_cmd)
665                 return;
666         if (lpfc_ncmd->ts_cmd_wqput < lpfc_ncmd->ts_cmd_start)
667                 return;
668         if (lpfc_ncmd->ts_isr_cmpl < lpfc_ncmd->ts_cmd_wqput)
669                 return;
670         if (lpfc_ncmd->ts_data_nvme < lpfc_ncmd->ts_isr_cmpl)
671                 return;
672         /*
673          * Segment 1 - Time from Last FCP command cmpl is handed
674          * off to NVME Layer to start of next command.
675          * Segment 2 - Time from Driver receives a IO cmd start
676          * from NVME Layer to WQ put is done on IO cmd.
677          * Segment 3 - Time from Driver WQ put is done on IO cmd
678          * to MSI-X ISR for IO cmpl.
679          * Segment 4 - Time from MSI-X ISR for IO cmpl to when
680          * cmpl is handled off to the NVME Layer.
681          */
682         seg1 = lpfc_ncmd->ts_cmd_start - lpfc_ncmd->ts_last_cmd;
683         if (seg1 > 5000000)  /* 5 ms - for sequential IOs */
684                 return;
685
686         /* Calculate times relative to start of IO */
687         seg2 = (lpfc_ncmd->ts_cmd_wqput - lpfc_ncmd->ts_cmd_start);
688         seg3 = (lpfc_ncmd->ts_isr_cmpl -
689                 lpfc_ncmd->ts_cmd_start) - seg2;
690         seg4 = (lpfc_ncmd->ts_data_nvme -
691                 lpfc_ncmd->ts_cmd_start) - seg2 - seg3;
692         phba->ktime_data_samples++;
693         phba->ktime_seg1_total += seg1;
694         if (seg1 < phba->ktime_seg1_min)
695                 phba->ktime_seg1_min = seg1;
696         else if (seg1 > phba->ktime_seg1_max)
697                 phba->ktime_seg1_max = seg1;
698         phba->ktime_seg2_total += seg2;
699         if (seg2 < phba->ktime_seg2_min)
700                 phba->ktime_seg2_min = seg2;
701         else if (seg2 > phba->ktime_seg2_max)
702                 phba->ktime_seg2_max = seg2;
703         phba->ktime_seg3_total += seg3;
704         if (seg3 < phba->ktime_seg3_min)
705                 phba->ktime_seg3_min = seg3;
706         else if (seg3 > phba->ktime_seg3_max)
707                 phba->ktime_seg3_max = seg3;
708         phba->ktime_seg4_total += seg4;
709         if (seg4 < phba->ktime_seg4_min)
710                 phba->ktime_seg4_min = seg4;
711         else if (seg4 > phba->ktime_seg4_max)
712                 phba->ktime_seg4_max = seg4;
713
714         lpfc_ncmd->ts_last_cmd = 0;
715         lpfc_ncmd->ts_cmd_start = 0;
716         lpfc_ncmd->ts_cmd_wqput  = 0;
717         lpfc_ncmd->ts_isr_cmpl = 0;
718         lpfc_ncmd->ts_data_nvme = 0;
719 }
720 #endif
721
722 /**
723  * lpfc_nvme_io_cmd_wqe_cmpl - Complete an NVME-over-FCP IO
724  * @lpfc_pnvme: Pointer to the driver's nvme instance data
725  * @lpfc_nvme_lport: Pointer to the driver's local port data
726  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
727  *
728  * Driver registers this routine as it io request handler.  This
729  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
730  * data structure to the rport indicated in @lpfc_nvme_rport.
731  *
732  * Return value :
733  *   0 - Success
734  *   TODO: What are the failure codes.
735  **/
736 static void
737 lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
738                           struct lpfc_wcqe_complete *wcqe)
739 {
740         struct lpfc_nvme_buf *lpfc_ncmd =
741                 (struct lpfc_nvme_buf *)pwqeIn->context1;
742         struct lpfc_vport *vport = pwqeIn->vport;
743         struct nvmefc_fcp_req *nCmd;
744         struct nvme_fc_ersp_iu *ep;
745         struct nvme_fc_cmd_iu *cp;
746         struct lpfc_nvme_rport *rport;
747         struct lpfc_nodelist *ndlp;
748         unsigned long flags;
749         uint32_t code;
750         uint16_t cid, sqhd, data;
751         uint32_t *ptr;
752
753         /* Sanity check on return of outstanding command */
754         if (!lpfc_ncmd || !lpfc_ncmd->nvmeCmd || !lpfc_ncmd->nrport) {
755                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
756                                  "6071 Completion pointers bad on wqe %p.\n",
757                                  wcqe);
758                 return;
759         }
760         phba->fc4NvmeIoCmpls++;
761
762         nCmd = lpfc_ncmd->nvmeCmd;
763         rport = lpfc_ncmd->nrport;
764
765         lpfc_nvmeio_data(phba, "NVME FCP CMPL: xri x%x stat x%x parm x%x\n",
766                          lpfc_ncmd->cur_iocbq.sli4_xritag,
767                          bf_get(lpfc_wcqe_c_status, wcqe), wcqe->parameter);
768         /*
769          * Catch race where our node has transitioned, but the
770          * transport is still transitioning.
771          */
772         ndlp = rport->ndlp;
773         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
774                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
775                                  "6061 rport %p, ndlp %p, DID x%06x ndlp "
776                                  "not ready.\n",
777                                  rport, ndlp, rport->remoteport->port_id);
778
779                 ndlp = lpfc_findnode_did(vport, rport->remoteport->port_id);
780                 if (!ndlp) {
781                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
782                                          "6062 Ignoring NVME cmpl.  No ndlp\n");
783                         goto out_err;
784                 }
785         }
786
787         code = bf_get(lpfc_wcqe_c_code, wcqe);
788         if (code == CQE_CODE_NVME_ERSP) {
789                 /* For this type of CQE, we need to rebuild the rsp */
790                 ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr;
791
792                 /*
793                  * Get Command Id from cmd to plug into response. This
794                  * code is not needed in the next NVME Transport drop.
795                  */
796                 cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr;
797                 cid = cp->sqe.common.command_id;
798
799                 /*
800                  * RSN is in CQE word 2
801                  * SQHD is in CQE Word 3 bits 15:0
802                  * Cmd Specific info is in CQE Word 1
803                  * and in CQE Word 0 bits 15:0
804                  */
805                 sqhd = bf_get(lpfc_wcqe_c_sqhead, wcqe);
806
807                 /* Now lets build the NVME ERSP IU */
808                 ep->iu_len = cpu_to_be16(8);
809                 ep->rsn = wcqe->parameter;
810                 ep->xfrd_len = cpu_to_be32(nCmd->payload_length);
811                 ep->rsvd12 = 0;
812                 ptr = (uint32_t *)&ep->cqe.result.u64;
813                 *ptr++ = wcqe->total_data_placed;
814                 data = bf_get(lpfc_wcqe_c_ersp0, wcqe);
815                 *ptr = (uint32_t)data;
816                 ep->cqe.sq_head = sqhd;
817                 ep->cqe.sq_id =  nCmd->sqid;
818                 ep->cqe.command_id = cid;
819                 ep->cqe.status = 0;
820
821                 lpfc_ncmd->status = IOSTAT_SUCCESS;
822                 lpfc_ncmd->result = 0;
823                 nCmd->rcv_rsplen = LPFC_NVME_ERSP_LEN;
824                 nCmd->transferred_length = nCmd->payload_length;
825         } else {
826                 lpfc_ncmd->status = (bf_get(lpfc_wcqe_c_status, wcqe) &
827                             LPFC_IOCB_STATUS_MASK);
828                 lpfc_ncmd->result = wcqe->parameter;
829
830                 /* For NVME, the only failure path that results in an
831                  * IO error is when the adapter rejects it.  All other
832                  * conditions are a success case and resolved by the
833                  * transport.
834                  * IOSTAT_FCP_RSP_ERROR means:
835                  * 1. Length of data received doesn't match total
836                  *    transfer length in WQE
837                  * 2. If the RSP payload does NOT match these cases:
838                  *    a. RSP length 12/24 bytes and all zeros
839                  *    b. NVME ERSP
840                  */
841                 switch (lpfc_ncmd->status) {
842                 case IOSTAT_SUCCESS:
843                         nCmd->transferred_length = wcqe->total_data_placed;
844                         nCmd->rcv_rsplen = 0;
845                         nCmd->status = 0;
846                         break;
847                 case IOSTAT_FCP_RSP_ERROR:
848                         nCmd->transferred_length = wcqe->total_data_placed;
849                         nCmd->rcv_rsplen = wcqe->parameter;
850                         nCmd->status = 0;
851                         /* Sanity check */
852                         if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN)
853                                 break;
854                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
855                                          "6081 NVME Completion Protocol Error: "
856                                          "status x%x result x%x placed x%x\n",
857                                          lpfc_ncmd->status, lpfc_ncmd->result,
858                                          wcqe->total_data_placed);
859                         break;
860                 default:
861 out_err:
862                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
863                                          "6072 NVME Completion Error: "
864                                          "status x%x result x%x placed x%x\n",
865                                          lpfc_ncmd->status, lpfc_ncmd->result,
866                                          wcqe->total_data_placed);
867                         nCmd->transferred_length = 0;
868                         nCmd->rcv_rsplen = 0;
869                         nCmd->status = NVME_SC_FC_TRANSPORT_ERROR;
870                 }
871         }
872
873         /* pick up SLI4 exhange busy condition */
874         if (bf_get(lpfc_wcqe_c_xb, wcqe))
875                 lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
876         else
877                 lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
878
879         if (ndlp && NLP_CHK_NODE_ACT(ndlp))
880                 atomic_dec(&ndlp->cmd_pending);
881
882         /* Update stats and complete the IO.  There is
883          * no need for dma unprep because the nvme_transport
884          * owns the dma address.
885          */
886 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
887         if (phba->ktime_on) {
888                 lpfc_ncmd->ts_isr_cmpl = pwqeIn->isr_timestamp;
889                 lpfc_ncmd->ts_data_nvme = ktime_get_ns();
890                 phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme;
891                 lpfc_nvme_ktime(phba, lpfc_ncmd);
892         }
893         if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
894                 if (lpfc_ncmd->cpu != smp_processor_id())
895                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
896                                          "6701 CPU Check cmpl: "
897                                          "cpu %d expect %d\n",
898                                          smp_processor_id(), lpfc_ncmd->cpu);
899                 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
900                         phba->cpucheck_cmpl_io[lpfc_ncmd->cpu]++;
901         }
902 #endif
903         nCmd->done(nCmd);
904
905         spin_lock_irqsave(&phba->hbalock, flags);
906         lpfc_ncmd->nrport = NULL;
907         spin_unlock_irqrestore(&phba->hbalock, flags);
908
909         lpfc_release_nvme_buf(phba, lpfc_ncmd);
910 }
911
912
913 /**
914  * lpfc_nvme_prep_io_cmd - Issue an NVME-over-FCP IO
915  * @lpfc_pnvme: Pointer to the driver's nvme instance data
916  * @lpfc_nvme_lport: Pointer to the driver's local port data
917  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
918  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
919  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
920  *
921  * Driver registers this routine as it io request handler.  This
922  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
923  * data structure to the rport indicated in @lpfc_nvme_rport.
924  *
925  * Return value :
926  *   0 - Success
927  *   TODO: What are the failure codes.
928  **/
929 static int
930 lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
931                       struct lpfc_nvme_buf *lpfc_ncmd,
932                       struct lpfc_nodelist *pnode)
933 {
934         struct lpfc_hba *phba = vport->phba;
935         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
936         struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq);
937         union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
938         uint32_t req_len;
939
940         if (!pnode || !NLP_CHK_NODE_ACT(pnode))
941                 return -EINVAL;
942
943         /*
944          * There are three possibilities here - use scatter-gather segment, use
945          * the single mapping, or neither.
946          */
947         wqe->fcp_iwrite.initial_xfer_len = 0;
948         if (nCmd->sg_cnt) {
949                 if (nCmd->io_dir == NVMEFC_FCP_WRITE) {
950                         /* Word 5 */
951                         if ((phba->cfg_nvme_enable_fb) &&
952                             (pnode->nlp_flag & NLP_FIRSTBURST)) {
953                                 req_len = lpfc_ncmd->nvmeCmd->payload_length;
954                                 if (req_len < pnode->nvme_fb_size)
955                                         wqe->fcp_iwrite.initial_xfer_len =
956                                                 req_len;
957                                 else
958                                         wqe->fcp_iwrite.initial_xfer_len =
959                                                 pnode->nvme_fb_size;
960                         }
961
962                         /* Word 7 */
963                         bf_set(wqe_cmnd, &wqe->generic.wqe_com,
964                                CMD_FCP_IWRITE64_WQE);
965                         bf_set(wqe_pu, &wqe->generic.wqe_com,
966                                PARM_READ_CHECK);
967
968                         /* Word 10 */
969                         bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
970                         bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com,
971                                LPFC_WQE_IOD_WRITE);
972                         bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
973                                LPFC_WQE_LENLOC_WORD4);
974                         if (phba->cfg_nvme_oas)
975                                 bf_set(wqe_oas, &wqe->fcp_iwrite.wqe_com, 1);
976
977                         /* Word 11 */
978                         bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
979                                NVME_WRITE_CMD);
980
981                         /* Word 16 */
982                         wqe->words[16] = LPFC_NVME_EMBED_WRITE;
983
984                         phba->fc4NvmeOutputRequests++;
985                 } else {
986                         /* Word 7 */
987                         bf_set(wqe_cmnd, &wqe->generic.wqe_com,
988                                CMD_FCP_IREAD64_WQE);
989                         bf_set(wqe_pu, &wqe->generic.wqe_com,
990                                PARM_READ_CHECK);
991
992                         /* Word 10 */
993                         bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
994                         bf_set(wqe_iod, &wqe->fcp_iread.wqe_com,
995                                LPFC_WQE_IOD_READ);
996                         bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
997                                LPFC_WQE_LENLOC_WORD4);
998                         if (phba->cfg_nvme_oas)
999                                 bf_set(wqe_oas, &wqe->fcp_iread.wqe_com, 1);
1000
1001                         /* Word 11 */
1002                         bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
1003                                NVME_READ_CMD);
1004
1005                         /* Word 16 */
1006                         wqe->words[16] = LPFC_NVME_EMBED_READ;
1007
1008                         phba->fc4NvmeInputRequests++;
1009                 }
1010         } else {
1011                 /* Word 4 */
1012                 wqe->fcp_icmd.rsrvd4 = 0;
1013
1014                 /* Word 7 */
1015                 bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_FCP_ICMND64_WQE);
1016                 bf_set(wqe_pu, &wqe->generic.wqe_com, 0);
1017
1018                 /* Word 10 */
1019                 bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
1020                 bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
1021                 bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
1022                        LPFC_WQE_LENLOC_NONE);
1023                 if (phba->cfg_nvme_oas)
1024                         bf_set(wqe_oas, &wqe->fcp_icmd.wqe_com, 1);
1025
1026                 /* Word 11 */
1027                 bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
1028
1029                 /* Word 16 */
1030                 wqe->words[16] = LPFC_NVME_EMBED_CMD;
1031
1032                 phba->fc4NvmeControlRequests++;
1033         }
1034         /*
1035          * Finish initializing those WQE fields that are independent
1036          * of the nvme_cmnd request_buffer
1037          */
1038
1039         /* Word 6 */
1040         bf_set(wqe_ctxt_tag, &wqe->generic.wqe_com,
1041                phba->sli4_hba.rpi_ids[pnode->nlp_rpi]);
1042         bf_set(wqe_xri_tag, &wqe->generic.wqe_com, pwqeq->sli4_xritag);
1043
1044         /* Word 7 */
1045         /* Preserve Class data in the ndlp. */
1046         bf_set(wqe_class, &wqe->generic.wqe_com,
1047                (pnode->nlp_fcp_info & 0x0f));
1048
1049         /* Word 8 */
1050         wqe->generic.wqe_com.abort_tag = pwqeq->iotag;
1051
1052         /* Word 9 */
1053         bf_set(wqe_reqtag, &wqe->generic.wqe_com, pwqeq->iotag);
1054
1055         /* Word 11 */
1056         bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1057
1058         pwqeq->vport = vport;
1059         return 0;
1060 }
1061
1062
1063 /**
1064  * lpfc_nvme_prep_io_dma - Issue an NVME-over-FCP IO
1065  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1066  * @lpfc_nvme_lport: Pointer to the driver's local port data
1067  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1068  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1069  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1070  *
1071  * Driver registers this routine as it io request handler.  This
1072  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1073  * data structure to the rport indicated in @lpfc_nvme_rport.
1074  *
1075  * Return value :
1076  *   0 - Success
1077  *   TODO: What are the failure codes.
1078  **/
1079 static int
1080 lpfc_nvme_prep_io_dma(struct lpfc_vport *vport,
1081                       struct lpfc_nvme_buf *lpfc_ncmd)
1082 {
1083         struct lpfc_hba *phba = vport->phba;
1084         struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd;
1085         union lpfc_wqe128 *wqe = (union lpfc_wqe128 *)&lpfc_ncmd->cur_iocbq.wqe;
1086         struct sli4_sge *sgl = lpfc_ncmd->nvme_sgl;
1087         struct scatterlist *data_sg;
1088         struct sli4_sge *first_data_sgl;
1089         dma_addr_t physaddr;
1090         uint32_t num_bde = 0;
1091         uint32_t dma_len;
1092         uint32_t dma_offset = 0;
1093         int nseg, i;
1094
1095         /* Fix up the command and response DMA stuff. */
1096         lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd);
1097
1098         /*
1099          * There are three possibilities here - use scatter-gather segment, use
1100          * the single mapping, or neither.
1101          */
1102         if (nCmd->sg_cnt) {
1103                 /*
1104                  * Jump over the cmd and rsp SGEs.  The fix routine
1105                  * has already adjusted for this.
1106                  */
1107                 sgl += 2;
1108
1109                 first_data_sgl = sgl;
1110                 lpfc_ncmd->seg_cnt = nCmd->sg_cnt;
1111                 if (lpfc_ncmd->seg_cnt > phba->cfg_sg_seg_cnt) {
1112                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1113                                         "6058 Too many sg segments from "
1114                                         "NVME Transport.  Max %d, "
1115                                         "nvmeIO sg_cnt %d\n",
1116                                         phba->cfg_sg_seg_cnt,
1117                                         lpfc_ncmd->seg_cnt);
1118                         lpfc_ncmd->seg_cnt = 0;
1119                         return 1;
1120                 }
1121
1122                 /*
1123                  * The driver established a maximum scatter-gather segment count
1124                  * during probe that limits the number of sg elements in any
1125                  * single nvme command.  Just run through the seg_cnt and format
1126                  * the sge's.
1127                  */
1128                 nseg = nCmd->sg_cnt;
1129                 data_sg = nCmd->first_sgl;
1130                 for (i = 0; i < nseg; i++) {
1131                         if (data_sg == NULL) {
1132                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1133                                                 "6059 dptr err %d, nseg %d\n",
1134                                                 i, nseg);
1135                                 lpfc_ncmd->seg_cnt = 0;
1136                                 return 1;
1137                         }
1138                         physaddr = data_sg->dma_address;
1139                         dma_len = data_sg->length;
1140                         sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
1141                         sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
1142                         sgl->word2 = le32_to_cpu(sgl->word2);
1143                         if ((num_bde + 1) == nseg)
1144                                 bf_set(lpfc_sli4_sge_last, sgl, 1);
1145                         else
1146                                 bf_set(lpfc_sli4_sge_last, sgl, 0);
1147                         bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
1148                         bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1149                         sgl->word2 = cpu_to_le32(sgl->word2);
1150                         sgl->sge_len = cpu_to_le32(dma_len);
1151
1152                         dma_offset += dma_len;
1153                         data_sg = sg_next(data_sg);
1154                         sgl++;
1155                 }
1156         } else {
1157                 /* For this clause to be valid, the payload_length
1158                  * and sg_cnt must zero.
1159                  */
1160                 if (nCmd->payload_length != 0) {
1161                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1162                                         "6063 NVME DMA Prep Err: sg_cnt %d "
1163                                         "payload_length x%x\n",
1164                                         nCmd->sg_cnt, nCmd->payload_length);
1165                         return 1;
1166                 }
1167         }
1168
1169         /*
1170          * Due to difference in data length between DIF/non-DIF paths,
1171          * we need to set word 4 of WQE here
1172          */
1173         wqe->fcp_iread.total_xfer_len = nCmd->payload_length;
1174         return 0;
1175 }
1176
1177 /**
1178  * lpfc_nvme_fcp_io_submit - Issue an NVME-over-FCP IO
1179  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1180  * @lpfc_nvme_lport: Pointer to the driver's local port data
1181  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1182  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1183  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1184  *
1185  * Driver registers this routine as it io request handler.  This
1186  * routine issues an fcp WQE with data from the @lpfc_nvme_fcpreq
1187  * data structure to the rport
1188  indicated in @lpfc_nvme_rport.
1189  *
1190  * Return value :
1191  *   0 - Success
1192  *   TODO: What are the failure codes.
1193  **/
1194 static int
1195 lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
1196                         struct nvme_fc_remote_port *pnvme_rport,
1197                         void *hw_queue_handle,
1198                         struct nvmefc_fcp_req *pnvme_fcreq)
1199 {
1200         int ret = 0;
1201         struct lpfc_nvme_lport *lport;
1202         struct lpfc_vport *vport;
1203         struct lpfc_hba *phba;
1204         struct lpfc_nodelist *ndlp;
1205         struct lpfc_nvme_buf *lpfc_ncmd;
1206         struct lpfc_nvme_rport *rport;
1207         struct lpfc_nvme_qhandle *lpfc_queue_info;
1208 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1209         uint64_t start = 0;
1210 #endif
1211
1212         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1213         vport = lport->vport;
1214         phba = vport->phba;
1215
1216 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1217         if (phba->ktime_on)
1218                 start = ktime_get_ns();
1219 #endif
1220         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1221         lpfc_queue_info = (struct lpfc_nvme_qhandle *)hw_queue_handle;
1222
1223         /*
1224          * Catch race where our node has transitioned, but the
1225          * transport is still transitioning.
1226          */
1227         ndlp = rport->ndlp;
1228         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1229                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1230                                  "6053 rport %p, ndlp %p, DID x%06x "
1231                                  "ndlp not ready.\n",
1232                                  rport, ndlp, pnvme_rport->port_id);
1233
1234                 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1235                 if (!ndlp) {
1236                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1237                                          "6066 Missing node for DID %x\n",
1238                                          pnvme_rport->port_id);
1239                         ret = -ENODEV;
1240                         goto out_fail;
1241                 }
1242         }
1243
1244         /* The remote node has to be a mapped target or it's an error. */
1245         if ((ndlp->nlp_type & NLP_NVME_TARGET) &&
1246             (ndlp->nlp_state != NLP_STE_MAPPED_NODE)) {
1247                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR,
1248                                  "6036 rport %p, DID x%06x not ready for "
1249                                  "IO. State x%x, Type x%x\n",
1250                                  rport, pnvme_rport->port_id,
1251                                  ndlp->nlp_state, ndlp->nlp_type);
1252                 ret = -ENODEV;
1253                 goto out_fail;
1254
1255         }
1256
1257         /* The node is shared with FCP IO, make sure the IO pending count does
1258          * not exceed the programmed depth.
1259          */
1260         if (atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth) {
1261                 ret = -EAGAIN;
1262                 goto out_fail;
1263         }
1264
1265         lpfc_ncmd = lpfc_get_nvme_buf(phba, ndlp);
1266         if (lpfc_ncmd == NULL) {
1267                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR,
1268                                  "6065 driver's buffer pool is empty, "
1269                                  "IO failed\n");
1270                 ret = -ENOMEM;
1271                 goto out_fail;
1272         }
1273 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1274         if (phba->ktime_on) {
1275                 lpfc_ncmd->ts_cmd_start = start;
1276                 lpfc_ncmd->ts_last_cmd = phba->ktime_last_cmd;
1277         }
1278 #endif
1279
1280         /*
1281          * Store the data needed by the driver to issue, abort, and complete
1282          * an IO.
1283          * Do not let the IO hang out forever.  There is no midlayer issuing
1284          * an abort so inform the FW of the maximum IO pending time.
1285          */
1286         pnvme_fcreq->private = (void *)lpfc_ncmd;
1287         lpfc_ncmd->nvmeCmd = pnvme_fcreq;
1288         lpfc_ncmd->nrport = rport;
1289         lpfc_ncmd->start_time = jiffies;
1290
1291         lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
1292         ret = lpfc_nvme_prep_io_dma(vport, lpfc_ncmd);
1293         if (ret) {
1294                 ret = -ENOMEM;
1295                 goto out_free_nvme_buf;
1296         }
1297
1298         atomic_inc(&ndlp->cmd_pending);
1299
1300         /*
1301          * Issue the IO on the WQ indicated by index in the hw_queue_handle.
1302          * This identfier was create in our hardware queue create callback
1303          * routine. The driver now is dependent on the IO queue steering from
1304          * the transport.  We are trusting the upper NVME layers know which
1305          * index to use and that they have affinitized a CPU to this hardware
1306          * queue. A hardware queue maps to a driver MSI-X vector/EQ/CQ/WQ.
1307          */
1308         lpfc_ncmd->cur_iocbq.hba_wqidx = lpfc_queue_info->index;
1309
1310         lpfc_nvmeio_data(phba, "NVME FCP XMIT: xri x%x idx %d to %06x\n",
1311                          lpfc_ncmd->cur_iocbq.sli4_xritag,
1312                          lpfc_queue_info->index, ndlp->nlp_DID);
1313
1314         ret = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, &lpfc_ncmd->cur_iocbq);
1315         if (ret) {
1316                 atomic_dec(&ndlp->cmd_pending);
1317                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
1318                                  "6113 FCP could not issue WQE err %x "
1319                                  "sid: x%x did: x%x oxid: x%x\n",
1320                                  ret, vport->fc_myDID, ndlp->nlp_DID,
1321                                  lpfc_ncmd->cur_iocbq.sli4_xritag);
1322                 ret = -EINVAL;
1323                 goto out_free_nvme_buf;
1324         }
1325
1326 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1327         if (phba->ktime_on)
1328                 lpfc_ncmd->ts_cmd_wqput = ktime_get_ns();
1329
1330         if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) {
1331                 lpfc_ncmd->cpu = smp_processor_id();
1332                 if (lpfc_ncmd->cpu != lpfc_queue_info->index) {
1333                         /* Check for admin queue */
1334                         if (lpfc_queue_info->qidx) {
1335                                 lpfc_printf_vlog(vport,
1336                                                  KERN_ERR, LOG_NVME_IOERR,
1337                                                 "6702 CPU Check cmd: "
1338                                                 "cpu %d wq %d\n",
1339                                                 lpfc_ncmd->cpu,
1340                                                 lpfc_queue_info->index);
1341                         }
1342                         lpfc_ncmd->cpu = lpfc_queue_info->index;
1343                 }
1344                 if (lpfc_ncmd->cpu < LPFC_CHECK_CPU_CNT)
1345                         phba->cpucheck_xmt_io[lpfc_ncmd->cpu]++;
1346         }
1347 #endif
1348         return 0;
1349
1350  out_free_nvme_buf:
1351         lpfc_release_nvme_buf(phba, lpfc_ncmd);
1352  out_fail:
1353         return ret;
1354 }
1355
1356 /**
1357  * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request.
1358  * @phba: Pointer to HBA context object
1359  * @cmdiocb: Pointer to command iocb object.
1360  * @rspiocb: Pointer to response iocb object.
1361  *
1362  * This is the callback function for any NVME FCP IO that was aborted.
1363  *
1364  * Return value:
1365  *   None
1366  **/
1367 void
1368 lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1369                            struct lpfc_wcqe_complete *abts_cmpl)
1370 {
1371         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1372                         "6145 ABORT_XRI_CN completing on rpi x%x "
1373                         "original iotag x%x, abort cmd iotag x%x "
1374                         "req_tag x%x, status x%x, hwstatus x%x\n",
1375                         cmdiocb->iocb.un.acxri.abortContextTag,
1376                         cmdiocb->iocb.un.acxri.abortIoTag,
1377                         cmdiocb->iotag,
1378                         bf_get(lpfc_wcqe_c_request_tag, abts_cmpl),
1379                         bf_get(lpfc_wcqe_c_status, abts_cmpl),
1380                         bf_get(lpfc_wcqe_c_hw_status, abts_cmpl));
1381         lpfc_sli_release_iocbq(phba, cmdiocb);
1382 }
1383
1384 /**
1385  * lpfc_nvme_fcp_abort - Issue an NVME-over-FCP ABTS
1386  * @lpfc_pnvme: Pointer to the driver's nvme instance data
1387  * @lpfc_nvme_lport: Pointer to the driver's local port data
1388  * @lpfc_nvme_rport: Pointer to the rport getting the @lpfc_nvme_ereq
1389  * @lpfc_nvme_fcreq: IO request from nvme fc to driver.
1390  * @hw_queue_handle: Driver-returned handle in lpfc_nvme_create_queue
1391  *
1392  * Driver registers this routine as its nvme request io abort handler.  This
1393  * routine issues an fcp Abort WQE with data from the @lpfc_nvme_fcpreq
1394  * data structure to the rport indicated in @lpfc_nvme_rport.  This routine
1395  * is executed asynchronously - one the target is validated as "MAPPED" and
1396  * ready for IO, the driver issues the abort request and returns.
1397  *
1398  * Return value:
1399  *   None
1400  **/
1401 static void
1402 lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
1403                     struct nvme_fc_remote_port *pnvme_rport,
1404                     void *hw_queue_handle,
1405                     struct nvmefc_fcp_req *pnvme_fcreq)
1406 {
1407         struct lpfc_nvme_lport *lport;
1408         struct lpfc_vport *vport;
1409         struct lpfc_hba *phba;
1410         struct lpfc_nodelist *ndlp;
1411         struct lpfc_nvme_rport *rport;
1412         struct lpfc_nvme_buf *lpfc_nbuf;
1413         struct lpfc_iocbq *abts_buf;
1414         struct lpfc_iocbq *nvmereq_wqe;
1415         union lpfc_wqe *abts_wqe;
1416         unsigned long flags;
1417         int ret_val;
1418
1419         lport = (struct lpfc_nvme_lport *)pnvme_lport->private;
1420         rport = (struct lpfc_nvme_rport *)pnvme_rport->private;
1421         vport = lport->vport;
1422         phba = vport->phba;
1423
1424         /* Announce entry to new IO submit field. */
1425         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1426                          "6002 Abort Request to rport DID x%06x "
1427                          "for nvme_fc_req %p\n",
1428                          pnvme_rport->port_id,
1429                          pnvme_fcreq);
1430
1431         /*
1432          * Catch race where our node has transitioned, but the
1433          * transport is still transitioning.
1434          */
1435         ndlp = rport->ndlp;
1436         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1437                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
1438                                  "6054 rport %p, ndlp %p, DID x%06x ndlp "
1439                                  " not ready.\n",
1440                                  rport, ndlp, pnvme_rport->port_id);
1441
1442                 ndlp = lpfc_findnode_did(vport, pnvme_rport->port_id);
1443                 if (!ndlp) {
1444                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS,
1445                                          "6055 Could not find node for "
1446                                          "DID %x\n",
1447                                          pnvme_rport->port_id);
1448                         return;
1449                 }
1450         }
1451
1452         /* The remote node has to be ready to send an abort. */
1453         if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) &&
1454             !(ndlp->nlp_type & NLP_NVME_TARGET)) {
1455                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_ABTS,
1456                                  "6048 rport %p, DID x%06x not ready for "
1457                                  "IO. State x%x, Type x%x\n",
1458                                  rport, pnvme_rport->port_id,
1459                                  ndlp->nlp_state, ndlp->nlp_type);
1460                 return;
1461         }
1462
1463         /* If the hba is getting reset, this flag is set.  It is
1464          * cleared when the reset is complete and rings reestablished.
1465          */
1466         spin_lock_irqsave(&phba->hbalock, flags);
1467         /* driver queued commands are in process of being flushed */
1468         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1469                 spin_unlock_irqrestore(&phba->hbalock, flags);
1470                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1471                                  "6139 Driver in reset cleanup - flushing "
1472                                  "NVME Req now.  hba_flag x%x\n",
1473                                  phba->hba_flag);
1474                 return;
1475         }
1476
1477         lpfc_nbuf = (struct lpfc_nvme_buf *)pnvme_fcreq->private;
1478         if (!lpfc_nbuf) {
1479                 spin_unlock_irqrestore(&phba->hbalock, flags);
1480                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1481                                  "6140 NVME IO req has no matching lpfc nvme "
1482                                  "io buffer.  Skipping abort req.\n");
1483                 return;
1484         } else if (!lpfc_nbuf->nvmeCmd) {
1485                 spin_unlock_irqrestore(&phba->hbalock, flags);
1486                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1487                                  "6141 lpfc NVME IO req has no nvme_fcreq "
1488                                  "io buffer.  Skipping abort req.\n");
1489                 return;
1490         }
1491
1492         /*
1493          * The lpfc_nbuf and the mapped nvme_fcreq in the driver's
1494          * state must match the nvme_fcreq passed by the nvme
1495          * transport.  If they don't match, it is likely the driver
1496          * has already completed the NVME IO and the nvme transport
1497          * has not seen it yet.
1498          */
1499         if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) {
1500                 spin_unlock_irqrestore(&phba->hbalock, flags);
1501                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1502                                  "6143 NVME req mismatch: "
1503                                  "lpfc_nbuf %p nvmeCmd %p, "
1504                                  "pnvme_fcreq %p.  Skipping Abort\n",
1505                                  lpfc_nbuf, lpfc_nbuf->nvmeCmd,
1506                                  pnvme_fcreq);
1507                 return;
1508         }
1509
1510         /* Don't abort IOs no longer on the pending queue. */
1511         nvmereq_wqe = &lpfc_nbuf->cur_iocbq;
1512         if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) {
1513                 spin_unlock_irqrestore(&phba->hbalock, flags);
1514                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1515                                  "6142 NVME IO req %p not queued - skipping "
1516                                  "abort req\n",
1517                                  pnvme_fcreq);
1518                 return;
1519         }
1520
1521         lpfc_nvmeio_data(phba, "NVME FCP ABORT: xri x%x idx %d to %06x\n",
1522                          nvmereq_wqe->sli4_xritag,
1523                          nvmereq_wqe->hba_wqidx, ndlp->nlp_DID);
1524
1525         /* Outstanding abort is in progress */
1526         if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) {
1527                 spin_unlock_irqrestore(&phba->hbalock, flags);
1528                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1529                                  "6144 Outstanding NVME I/O Abort Request "
1530                                  "still pending on nvme_fcreq %p, "
1531                                  "lpfc_ncmd %p\n",
1532                                  pnvme_fcreq, lpfc_nbuf);
1533                 return;
1534         }
1535
1536         abts_buf = __lpfc_sli_get_iocbq(phba);
1537         if (!abts_buf) {
1538                 spin_unlock_irqrestore(&phba->hbalock, flags);
1539                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1540                                  "6136 No available abort wqes. Skipping "
1541                                  "Abts req for nvme_fcreq %p.\n",
1542                                  pnvme_fcreq);
1543                 return;
1544         }
1545
1546         /* Ready - mark outstanding as aborted by driver. */
1547         nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED;
1548
1549         /* Complete prepping the abort wqe and issue to the FW. */
1550         abts_wqe = &abts_buf->wqe;
1551
1552         /* WQEs are reused.  Clear stale data and set key fields to
1553          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1554          */
1555         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1556         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1557
1558         /* word 7 */
1559         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1560         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1561         bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com,
1562                nvmereq_wqe->iocb.ulpClass);
1563
1564         /* word 8 - tell the FW to abort the IO associated with this
1565          * outstanding exchange ID.
1566          */
1567         abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag;
1568
1569         /* word 9 - this is the iotag for the abts_wqe completion. */
1570         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1571                abts_buf->iotag);
1572
1573         /* word 10 */
1574         bf_set(wqe_wqid, &abts_wqe->abort_cmd.wqe_com, nvmereq_wqe->hba_wqidx);
1575         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1576         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1577
1578         /* word 11 */
1579         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1580         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1581         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1582
1583         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1584         abts_buf->iocb_flag |= LPFC_IO_NVME;
1585         abts_buf->hba_wqidx = nvmereq_wqe->hba_wqidx;
1586         abts_buf->vport = vport;
1587         abts_buf->wqe_cmpl = lpfc_nvme_abort_fcreq_cmpl;
1588         ret_val = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_buf);
1589         spin_unlock_irqrestore(&phba->hbalock, flags);
1590         if (ret_val == IOCB_ERROR) {
1591                 lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1592                                  "6137 Failed abts issue_wqe with status x%x "
1593                                  "for nvme_fcreq %p.\n",
1594                                  ret_val, pnvme_fcreq);
1595                 lpfc_sli_release_iocbq(phba, abts_buf);
1596                 return;
1597         }
1598
1599         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME,
1600                          "6138 Transport Abort NVME Request Issued for\n"
1601                          "ox_id x%x on reqtag x%x\n",
1602                          nvmereq_wqe->sli4_xritag,
1603                          abts_buf->iotag);
1604 }
1605
1606 /* Declare and initialization an instance of the FC NVME template. */
1607 static struct nvme_fc_port_template lpfc_nvme_template = {
1608         /* initiator-based functions */
1609         .localport_delete  = lpfc_nvme_localport_delete,
1610         .remoteport_delete = lpfc_nvme_remoteport_delete,
1611         .create_queue = lpfc_nvme_create_queue,
1612         .delete_queue = lpfc_nvme_delete_queue,
1613         .ls_req       = lpfc_nvme_ls_req,
1614         .fcp_io       = lpfc_nvme_fcp_io_submit,
1615         .ls_abort     = lpfc_nvme_ls_abort,
1616         .fcp_abort    = lpfc_nvme_fcp_abort,
1617
1618         .max_hw_queues = 1,
1619         .max_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1620         .max_dif_sgl_segments = LPFC_NVME_DEFAULT_SEGS,
1621         .dma_boundary = 0xFFFFFFFF,
1622
1623         /* Sizes of additional private data for data structures.
1624          * No use for the last two sizes at this time.
1625          */
1626         .local_priv_sz = sizeof(struct lpfc_nvme_lport),
1627         .remote_priv_sz = sizeof(struct lpfc_nvme_rport),
1628         .lsrqst_priv_sz = 0,
1629         .fcprqst_priv_sz = 0,
1630 };
1631
1632 /**
1633  * lpfc_sli4_post_nvme_sgl_block - post a block of nvme sgl list to firmware
1634  * @phba: pointer to lpfc hba data structure.
1635  * @nblist: pointer to nvme buffer list.
1636  * @count: number of scsi buffers on the list.
1637  *
1638  * This routine is invoked to post a block of @count scsi sgl pages from a
1639  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
1640  * No Lock is held.
1641  *
1642  **/
1643 static int
1644 lpfc_sli4_post_nvme_sgl_block(struct lpfc_hba *phba,
1645                               struct list_head *nblist,
1646                               int count)
1647 {
1648         struct lpfc_nvme_buf *lpfc_ncmd;
1649         struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
1650         struct sgl_page_pairs *sgl_pg_pairs;
1651         void *viraddr;
1652         LPFC_MBOXQ_t *mbox;
1653         uint32_t reqlen, alloclen, pg_pairs;
1654         uint32_t mbox_tmo;
1655         uint16_t xritag_start = 0;
1656         int rc = 0;
1657         uint32_t shdr_status, shdr_add_status;
1658         dma_addr_t pdma_phys_bpl1;
1659         union lpfc_sli4_cfg_shdr *shdr;
1660
1661         /* Calculate the requested length of the dma memory */
1662         reqlen = count * sizeof(struct sgl_page_pairs) +
1663                  sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
1664         if (reqlen > SLI4_PAGE_SIZE) {
1665                 lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
1666                                 "6118 Block sgl registration required DMA "
1667                                 "size (%d) great than a page\n", reqlen);
1668                 return -ENOMEM;
1669         }
1670         mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1671         if (!mbox) {
1672                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1673                                 "6119 Failed to allocate mbox cmd memory\n");
1674                 return -ENOMEM;
1675         }
1676
1677         /* Allocate DMA memory and set up the non-embedded mailbox command */
1678         alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
1679                                 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
1680                                 LPFC_SLI4_MBX_NEMBED);
1681
1682         if (alloclen < reqlen) {
1683                 lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1684                                 "6120 Allocated DMA memory size (%d) is "
1685                                 "less than the requested DMA memory "
1686                                 "size (%d)\n", alloclen, reqlen);
1687                 lpfc_sli4_mbox_cmd_free(phba, mbox);
1688                 return -ENOMEM;
1689         }
1690
1691         /* Get the first SGE entry from the non-embedded DMA memory */
1692         viraddr = mbox->sge_array->addr[0];
1693
1694         /* Set up the SGL pages in the non-embedded DMA pages */
1695         sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
1696         sgl_pg_pairs = &sgl->sgl_pg_pairs;
1697
1698         pg_pairs = 0;
1699         list_for_each_entry(lpfc_ncmd, nblist, list) {
1700                 /* Set up the sge entry */
1701                 sgl_pg_pairs->sgl_pg0_addr_lo =
1702                         cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
1703                 sgl_pg_pairs->sgl_pg0_addr_hi =
1704                         cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
1705                 if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
1706                         pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
1707                                                 SGL_PAGE_SIZE;
1708                 else
1709                         pdma_phys_bpl1 = 0;
1710                 sgl_pg_pairs->sgl_pg1_addr_lo =
1711                         cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
1712                 sgl_pg_pairs->sgl_pg1_addr_hi =
1713                         cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
1714                 /* Keep the first xritag on the list */
1715                 if (pg_pairs == 0)
1716                         xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
1717                 sgl_pg_pairs++;
1718                 pg_pairs++;
1719         }
1720         bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
1721         bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
1722         /* Perform endian conversion if necessary */
1723         sgl->word0 = cpu_to_le32(sgl->word0);
1724
1725         if (!phba->sli4_hba.intr_enable)
1726                 rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
1727         else {
1728                 mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
1729                 rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
1730         }
1731         shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
1732         shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
1733         shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
1734         if (rc != MBX_TIMEOUT)
1735                 lpfc_sli4_mbox_cmd_free(phba, mbox);
1736         if (shdr_status || shdr_add_status || rc) {
1737                 lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1738                                 "6125 POST_SGL_BLOCK mailbox command failed "
1739                                 "status x%x add_status x%x mbx status x%x\n",
1740                                 shdr_status, shdr_add_status, rc);
1741                 rc = -ENXIO;
1742         }
1743         return rc;
1744 }
1745
1746 /**
1747  * lpfc_post_nvme_sgl_list - Post blocks of nvme buffer sgls from a list
1748  * @phba: pointer to lpfc hba data structure.
1749  * @post_nblist: pointer to the nvme buffer list.
1750  *
1751  * This routine walks a list of nvme buffers that was passed in. It attempts
1752  * to construct blocks of nvme buffer sgls which contains contiguous xris and
1753  * uses the non-embedded SGL block post mailbox commands to post to the port.
1754  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
1755  * embedded SGL post mailbox command for posting. The @post_nblist passed in
1756  * must be local list, thus no lock is needed when manipulate the list.
1757  *
1758  * Returns: 0 = failure, non-zero number of successfully posted buffers.
1759  **/
1760 static int
1761 lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
1762                              struct list_head *post_nblist, int sb_count)
1763 {
1764         struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
1765         int status, sgl_size;
1766         int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
1767         dma_addr_t pdma_phys_sgl1;
1768         int last_xritag = NO_XRI;
1769         int cur_xritag;
1770         LIST_HEAD(prep_nblist);
1771         LIST_HEAD(blck_nblist);
1772         LIST_HEAD(nvme_nblist);
1773
1774         /* sanity check */
1775         if (sb_count <= 0)
1776                 return -EINVAL;
1777
1778         sgl_size = phba->cfg_sg_dma_buf_size;
1779
1780         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
1781                 list_del_init(&lpfc_ncmd->list);
1782                 block_cnt++;
1783                 if ((last_xritag != NO_XRI) &&
1784                     (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
1785                         /* a hole in xri block, form a sgl posting block */
1786                         list_splice_init(&prep_nblist, &blck_nblist);
1787                         post_cnt = block_cnt - 1;
1788                         /* prepare list for next posting block */
1789                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1790                         block_cnt = 1;
1791                 } else {
1792                         /* prepare list for next posting block */
1793                         list_add_tail(&lpfc_ncmd->list, &prep_nblist);
1794                         /* enough sgls for non-embed sgl mbox command */
1795                         if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
1796                                 list_splice_init(&prep_nblist, &blck_nblist);
1797                                 post_cnt = block_cnt;
1798                                 block_cnt = 0;
1799                         }
1800                 }
1801                 num_posting++;
1802                 last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1803
1804                 /* end of repost sgl list condition for NVME buffers */
1805                 if (num_posting == sb_count) {
1806                         if (post_cnt == 0) {
1807                                 /* last sgl posting block */
1808                                 list_splice_init(&prep_nblist, &blck_nblist);
1809                                 post_cnt = block_cnt;
1810                         } else if (block_cnt == 1) {
1811                                 /* last single sgl with non-contiguous xri */
1812                                 if (sgl_size > SGL_PAGE_SIZE)
1813                                         pdma_phys_sgl1 =
1814                                                 lpfc_ncmd->dma_phys_sgl +
1815                                                 SGL_PAGE_SIZE;
1816                                 else
1817                                         pdma_phys_sgl1 = 0;
1818                                 cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
1819                                 status = lpfc_sli4_post_sgl(phba,
1820                                                 lpfc_ncmd->dma_phys_sgl,
1821                                                 pdma_phys_sgl1, cur_xritag);
1822                                 if (status) {
1823                                         /* failure, put on abort nvme list */
1824                                         lpfc_ncmd->exch_busy = 1;
1825                                 } else {
1826                                         /* success, put on NVME buffer list */
1827                                         lpfc_ncmd->exch_busy = 0;
1828                                         lpfc_ncmd->status = IOSTAT_SUCCESS;
1829                                         num_posted++;
1830                                 }
1831                                 /* success, put on NVME buffer sgl list */
1832                                 list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1833                         }
1834                 }
1835
1836                 /* continue until a nembed page worth of sgls */
1837                 if (post_cnt == 0)
1838                         continue;
1839
1840                 /* post block of NVME buffer list sgls */
1841                 status = lpfc_sli4_post_nvme_sgl_block(phba, &blck_nblist,
1842                                                        post_cnt);
1843
1844                 /* don't reset xirtag due to hole in xri block */
1845                 if (block_cnt == 0)
1846                         last_xritag = NO_XRI;
1847
1848                 /* reset NVME buffer post count for next round of posting */
1849                 post_cnt = 0;
1850
1851                 /* put posted NVME buffer-sgl posted on NVME buffer sgl list */
1852                 while (!list_empty(&blck_nblist)) {
1853                         list_remove_head(&blck_nblist, lpfc_ncmd,
1854                                          struct lpfc_nvme_buf, list);
1855                         if (status) {
1856                                 /* failure, put on abort nvme list */
1857                                 lpfc_ncmd->exch_busy = 1;
1858                         } else {
1859                                 /* success, put on NVME buffer list */
1860                                 lpfc_ncmd->exch_busy = 0;
1861                                 lpfc_ncmd->status = IOSTAT_SUCCESS;
1862                                 num_posted++;
1863                         }
1864                         list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
1865                 }
1866         }
1867         /* Push NVME buffers with sgl posted to the available list */
1868         while (!list_empty(&nvme_nblist)) {
1869                 list_remove_head(&nvme_nblist, lpfc_ncmd,
1870                                  struct lpfc_nvme_buf, list);
1871                 lpfc_release_nvme_buf(phba, lpfc_ncmd);
1872         }
1873         return num_posted;
1874 }
1875
1876 /**
1877  * lpfc_repost_nvme_sgl_list - Repost all the allocated nvme buffer sgls
1878  * @phba: pointer to lpfc hba data structure.
1879  *
1880  * This routine walks the list of nvme buffers that have been allocated and
1881  * repost them to the port by using SGL block post. This is needed after a
1882  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
1883  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
1884  * to the lpfc_nvme_buf_list. If the repost fails, reject all nvme buffers.
1885  *
1886  * Returns: 0 = success, non-zero failure.
1887  **/
1888 int
1889 lpfc_repost_nvme_sgl_list(struct lpfc_hba *phba)
1890 {
1891         LIST_HEAD(post_nblist);
1892         int num_posted, rc = 0;
1893
1894         /* get all NVME buffers need to repost to a local list */
1895         spin_lock_irq(&phba->nvme_buf_list_get_lock);
1896         spin_lock(&phba->nvme_buf_list_put_lock);
1897         list_splice_init(&phba->lpfc_nvme_buf_list_get, &post_nblist);
1898         list_splice(&phba->lpfc_nvme_buf_list_put, &post_nblist);
1899         spin_unlock(&phba->nvme_buf_list_put_lock);
1900         spin_unlock_irq(&phba->nvme_buf_list_get_lock);
1901
1902         /* post the list of nvme buffer sgls to port if available */
1903         if (!list_empty(&post_nblist)) {
1904                 num_posted = lpfc_post_nvme_sgl_list(phba, &post_nblist,
1905                                                 phba->sli4_hba.nvme_xri_cnt);
1906                 /* failed to post any nvme buffer, return error */
1907                 if (num_posted == 0)
1908                         rc = -EIO;
1909         }
1910         return rc;
1911 }
1912
1913 /**
1914  * lpfc_new_nvme_buf - Scsi buffer allocator for HBA with SLI4 IF spec
1915  * @vport: The virtual port for which this call being executed.
1916  * @num_to_allocate: The requested number of buffers to allocate.
1917  *
1918  * This routine allocates nvme buffers for device with SLI-4 interface spec,
1919  * the nvme buffer contains all the necessary information needed to initiate
1920  * a NVME I/O. After allocating up to @num_to_allocate NVME buffers and put
1921  * them on a list, it post them to the port by using SGL block post.
1922  *
1923  * Return codes:
1924  *   int - number of nvme buffers that were allocated and posted.
1925  *   0 = failure, less than num_to_alloc is a partial failure.
1926  **/
1927 static int
1928 lpfc_new_nvme_buf(struct lpfc_vport *vport, int num_to_alloc)
1929 {
1930         struct lpfc_hba *phba = vport->phba;
1931         struct lpfc_nvme_buf *lpfc_ncmd;
1932         struct lpfc_iocbq *pwqeq;
1933         union lpfc_wqe128 *wqe;
1934         struct sli4_sge *sgl;
1935         dma_addr_t pdma_phys_sgl;
1936         uint16_t iotag, lxri = 0;
1937         int bcnt, num_posted, sgl_size;
1938         LIST_HEAD(prep_nblist);
1939         LIST_HEAD(post_nblist);
1940         LIST_HEAD(nvme_nblist);
1941
1942         sgl_size = phba->cfg_sg_dma_buf_size;
1943
1944         for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
1945                 lpfc_ncmd = kzalloc(sizeof(struct lpfc_nvme_buf), GFP_KERNEL);
1946                 if (!lpfc_ncmd)
1947                         break;
1948                 /*
1949                  * Get memory from the pci pool to map the virt space to
1950                  * pci bus space for an I/O. The DMA buffer includes the
1951                  * number of SGE's necessary to support the sg_tablesize.
1952                  */
1953                 lpfc_ncmd->data = pci_pool_alloc(phba->lpfc_sg_dma_buf_pool,
1954                                                  GFP_KERNEL,
1955                                                  &lpfc_ncmd->dma_handle);
1956                 if (!lpfc_ncmd->data) {
1957                         kfree(lpfc_ncmd);
1958                         break;
1959                 }
1960                 memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
1961
1962                 lxri = lpfc_sli4_next_xritag(phba);
1963                 if (lxri == NO_XRI) {
1964                         pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1965                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1966                         kfree(lpfc_ncmd);
1967                         break;
1968                 }
1969                 pwqeq = &(lpfc_ncmd->cur_iocbq);
1970                 wqe = (union lpfc_wqe128 *)&pwqeq->wqe;
1971
1972                 /* Allocate iotag for lpfc_ncmd->cur_iocbq. */
1973                 iotag = lpfc_sli_next_iotag(phba, pwqeq);
1974                 if (iotag == 0) {
1975                         pci_pool_free(phba->lpfc_sg_dma_buf_pool,
1976                                       lpfc_ncmd->data, lpfc_ncmd->dma_handle);
1977                         kfree(lpfc_ncmd);
1978                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1979                                         "6121 Failed to allocated IOTAG for"
1980                                         " XRI:0x%x\n", lxri);
1981                         lpfc_sli4_free_xri(phba, lxri);
1982                         break;
1983                 }
1984                 pwqeq->sli4_lxritag = lxri;
1985                 pwqeq->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
1986                 pwqeq->iocb_flag |= LPFC_IO_NVME;
1987                 pwqeq->context1 = lpfc_ncmd;
1988                 pwqeq->wqe_cmpl = lpfc_nvme_io_cmd_wqe_cmpl;
1989
1990                 /* Initialize local short-hand pointers. */
1991                 lpfc_ncmd->nvme_sgl = lpfc_ncmd->data;
1992                 sgl = lpfc_ncmd->nvme_sgl;
1993                 pdma_phys_sgl = lpfc_ncmd->dma_handle;
1994                 lpfc_ncmd->dma_phys_sgl = pdma_phys_sgl;
1995
1996                 /* Rsp SGE will be filled in when we rcv an IO
1997                  * from the NVME Layer to be sent.
1998                  * The cmd is going to be embedded so we need a SKIP SGE.
1999                  */
2000                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
2001                 bf_set(lpfc_sli4_sge_last, sgl, 0);
2002                 sgl->word2 = cpu_to_le32(sgl->word2);
2003                 /* Fill in word 3 / sgl_len during cmd submission */
2004
2005                 lpfc_ncmd->cur_iocbq.context1 = lpfc_ncmd;
2006
2007                 /* Word 7 */
2008                 bf_set(wqe_erp, &wqe->generic.wqe_com, 0);
2009                 /* NVME upper layers will time things out, if needed */
2010                 bf_set(wqe_tmo, &wqe->generic.wqe_com, 0);
2011
2012                 /* Word 10 */
2013                 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
2014                 bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
2015
2016                 /* add the nvme buffer to a post list */
2017                 list_add_tail(&lpfc_ncmd->list, &post_nblist);
2018                 spin_lock_irq(&phba->nvme_buf_list_get_lock);
2019                 phba->sli4_hba.nvme_xri_cnt++;
2020                 spin_unlock_irq(&phba->nvme_buf_list_get_lock);
2021         }
2022         lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
2023                         "6114 Allocate %d out of %d requested new NVME "
2024                         "buffers\n", bcnt, num_to_alloc);
2025
2026         /* post the list of nvme buffer sgls to port if available */
2027         if (!list_empty(&post_nblist))
2028                 num_posted = lpfc_post_nvme_sgl_list(phba,
2029                                                      &post_nblist, bcnt);
2030         else
2031                 num_posted = 0;
2032
2033         return num_posted;
2034 }
2035
2036 /**
2037  * lpfc_get_nvme_buf - Get a nvme buffer from lpfc_nvme_buf_list of the HBA
2038  * @phba: The HBA for which this call is being executed.
2039  *
2040  * This routine removes a nvme buffer from head of @phba lpfc_nvme_buf_list list
2041  * and returns to caller.
2042  *
2043  * Return codes:
2044  *   NULL - Error
2045  *   Pointer to lpfc_nvme_buf - Success
2046  **/
2047 static struct lpfc_nvme_buf *
2048 lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
2049 {
2050         struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
2051         unsigned long iflag = 0;
2052         int found = 0;
2053
2054         spin_lock_irqsave(&phba->nvme_buf_list_get_lock, iflag);
2055         list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2056                                  &phba->lpfc_nvme_buf_list_get, list) {
2057                 if (lpfc_test_rrq_active(phba, ndlp,
2058                                          lpfc_ncmd->cur_iocbq.sli4_lxritag))
2059                         continue;
2060                 list_del(&lpfc_ncmd->list);
2061                 found = 1;
2062                 break;
2063         }
2064         if (!found) {
2065                 spin_lock(&phba->nvme_buf_list_put_lock);
2066                 list_splice(&phba->lpfc_nvme_buf_list_put,
2067                             &phba->lpfc_nvme_buf_list_get);
2068                 INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
2069                 spin_unlock(&phba->nvme_buf_list_put_lock);
2070                 list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
2071                                          &phba->lpfc_nvme_buf_list_get, list) {
2072                         if (lpfc_test_rrq_active(
2073                                 phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
2074                                 continue;
2075                         list_del(&lpfc_ncmd->list);
2076                         found = 1;
2077                         break;
2078                 }
2079         }
2080         spin_unlock_irqrestore(&phba->nvme_buf_list_get_lock, iflag);
2081         if (!found)
2082                 return NULL;
2083         return  lpfc_ncmd;
2084 }
2085
2086 /**
2087  * lpfc_release_nvme_buf: Return a nvme buffer back to hba nvme buf list.
2088  * @phba: The Hba for which this call is being executed.
2089  * @lpfc_ncmd: The nvme buffer which is being released.
2090  *
2091  * This routine releases @lpfc_ncmd nvme buffer by adding it to tail of @phba
2092  * lpfc_nvme_buf_list list. For SLI4 XRI's are tied to the nvme buffer
2093  * and cannot be reused for at least RA_TOV amount of time if it was
2094  * aborted.
2095  **/
2096 static void
2097 lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
2098 {
2099         unsigned long iflag = 0;
2100
2101         lpfc_ncmd->nonsg_phys = 0;
2102         if (lpfc_ncmd->exch_busy) {
2103                 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
2104                                         iflag);
2105                 lpfc_ncmd->nvmeCmd = NULL;
2106                 list_add_tail(&lpfc_ncmd->list,
2107                         &phba->sli4_hba.lpfc_abts_nvme_buf_list);
2108                 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
2109                                         iflag);
2110         } else {
2111                 lpfc_ncmd->nvmeCmd = NULL;
2112                 lpfc_ncmd->cur_iocbq.iocb_flag = LPFC_IO_NVME;
2113                 spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
2114                 list_add_tail(&lpfc_ncmd->list, &phba->lpfc_nvme_buf_list_put);
2115                 spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
2116         }
2117 }
2118
2119 /**
2120  * lpfc_nvme_create_localport - Create/Bind an nvme localport instance.
2121  * @pvport - the lpfc_vport instance requesting a localport.
2122  *
2123  * This routine is invoked to create an nvme localport instance to bind
2124  * to the nvme_fc_transport.  It is called once during driver load
2125  * like lpfc_create_shost after all other services are initialized.
2126  * It requires a vport, vpi, and wwns at call time.  Other localport
2127  * parameters are modified as the driver's FCID and the Fabric WWN
2128  * are established.
2129  *
2130  * Return codes
2131  *      0 - successful
2132  *      -ENOMEM - no heap memory available
2133  *      other values - from nvme registration upcall
2134  **/
2135 int
2136 lpfc_nvme_create_localport(struct lpfc_vport *vport)
2137 {
2138         struct lpfc_hba  *phba = vport->phba;
2139         struct nvme_fc_port_info nfcp_info;
2140         struct nvme_fc_local_port *localport;
2141         struct lpfc_nvme_lport *lport;
2142         int len, ret = 0;
2143
2144         /* Initialize this localport instance.  The vport wwn usage ensures
2145          * that NPIV is accounted for.
2146          */
2147         memset(&nfcp_info, 0, sizeof(struct nvme_fc_port_info));
2148         nfcp_info.port_role = FC_PORT_ROLE_NVME_INITIATOR;
2149         nfcp_info.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
2150         nfcp_info.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
2151
2152         /* For now need + 1 to get around NVME transport logic */
2153         lpfc_nvme_template.max_sgl_segments = phba->cfg_sg_seg_cnt + 1;
2154         lpfc_nvme_template.max_hw_queues = phba->cfg_nvme_io_channel;
2155
2156         /* localport is allocated from the stack, but the registration
2157          * call allocates heap memory as well as the private area.
2158          */
2159         ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
2160                                          &vport->phba->pcidev->dev, &localport);
2161         if (!ret) {
2162                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
2163                                  "6005 Successfully registered local "
2164                                  "NVME port num %d, localP %p, private %p, "
2165                                  "sg_seg %d\n",
2166                                  localport->port_num, localport,
2167                                  localport->private,
2168                                  lpfc_nvme_template.max_sgl_segments);
2169
2170                 /* Private is our lport size declared in the template. */
2171                 lport = (struct lpfc_nvme_lport *)localport->private;
2172                 vport->localport = localport;
2173                 lport->vport = vport;
2174                 INIT_LIST_HEAD(&lport->rport_list);
2175                 vport->nvmei_support = 1;
2176         }
2177
2178         len  = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
2179         vport->phba->total_nvme_bufs += len;
2180         return ret;
2181 }
2182
2183 /**
2184  * lpfc_nvme_destroy_localport - Destroy lpfc_nvme bound to nvme transport.
2185  * @pnvme: pointer to lpfc nvme data structure.
2186  *
2187  * This routine is invoked to destroy all lports bound to the phba.
2188  * The lport memory was allocated by the nvme fc transport and is
2189  * released there.  This routine ensures all rports bound to the
2190  * lport have been disconnected.
2191  *
2192  **/
2193 void
2194 lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
2195 {
2196         struct nvme_fc_local_port *localport;
2197         struct lpfc_nvme_lport *lport;
2198         struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
2199         int ret;
2200
2201         if (vport->nvmei_support == 0)
2202                 return;
2203
2204         localport = vport->localport;
2205         vport->localport = NULL;
2206         lport = (struct lpfc_nvme_lport *)localport->private;
2207
2208         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2209                          "6011 Destroying NVME localport %p\n",
2210                          localport);
2211
2212         list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
2213                 /* The last node ref has to get released now before the rport
2214                  * private memory area is released by the transport.
2215                  */
2216                 list_del(&rport->list);
2217
2218                 init_completion(&rport->rport_unreg_done);
2219                 ret = nvme_fc_unregister_remoteport(rport->remoteport);
2220                 if (ret)
2221                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2222                                          "6008 rport fail destroy %x\n", ret);
2223                 wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2224         }
2225         /* lport's rport list is clear.  Unregister
2226          * lport and release resources.
2227          */
2228         init_completion(&lport->lport_unreg_done);
2229         ret = nvme_fc_unregister_localport(localport);
2230         wait_for_completion_timeout(&lport->lport_unreg_done, 5);
2231
2232         /* Regardless of the unregister upcall response, clear
2233          * nvmei_support.  All rports are unregistered and the
2234          * driver will clean up.
2235          */
2236         vport->nvmei_support = 0;
2237         if (ret == 0) {
2238                 lpfc_printf_vlog(vport,
2239                                  KERN_INFO, LOG_NVME_DISC,
2240                                  "6009 Unregistered lport Success\n");
2241         } else {
2242                 lpfc_printf_vlog(vport,
2243                                  KERN_INFO, LOG_NVME_DISC,
2244                                  "6010 Unregistered lport "
2245                                  "Failed, status x%x\n",
2246                                  ret);
2247         }
2248 }
2249
2250 void
2251 lpfc_nvme_update_localport(struct lpfc_vport *vport)
2252 {
2253         struct nvme_fc_local_port *localport;
2254         struct lpfc_nvme_lport *lport;
2255
2256         localport = vport->localport;
2257         lport = (struct lpfc_nvme_lport *)localport->private;
2258
2259         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
2260                          "6012 Update NVME lport %p did x%x\n",
2261                          localport, vport->fc_myDID);
2262
2263         localport->port_id = vport->fc_myDID;
2264         if (localport->port_id == 0)
2265                 localport->port_role = FC_PORT_ROLE_NVME_DISCOVERY;
2266         else
2267                 localport->port_role = FC_PORT_ROLE_NVME_INITIATOR;
2268
2269         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2270                          "6030 bound lport %p to DID x%06x\n",
2271                          lport, localport->port_id);
2272
2273 }
2274
2275 int
2276 lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2277 {
2278         int ret = 0;
2279         struct nvme_fc_local_port *localport;
2280         struct lpfc_nvme_lport *lport;
2281         struct lpfc_nvme_rport *rport;
2282         struct nvme_fc_remote_port *remote_port;
2283         struct nvme_fc_port_info rpinfo;
2284
2285         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2286                          "6006 Register NVME PORT. DID x%06x nlptype x%x\n",
2287                          ndlp->nlp_DID, ndlp->nlp_type);
2288
2289         localport = vport->localport;
2290         lport = (struct lpfc_nvme_lport *)localport->private;
2291
2292         if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2293
2294                 /* The driver isn't expecting the rport wwn to change
2295                  * but it might get a different DID on a different
2296                  * fabric.
2297                  */
2298                 list_for_each_entry(rport, &lport->rport_list, list) {
2299                         if (rport->remoteport->port_name !=
2300                             wwn_to_u64(ndlp->nlp_portname.u.wwn))
2301                                 continue;
2302                         lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC,
2303                                          "6035 lport %p, found matching rport "
2304                                          "at wwpn 0x%llx, Data: x%x x%x x%x "
2305                                          "x%06x\n",
2306                                          lport,
2307                                          rport->remoteport->port_name,
2308                                          rport->remoteport->port_id,
2309                                          rport->remoteport->port_role,
2310                                          ndlp->nlp_type,
2311                                          ndlp->nlp_DID);
2312                         remote_port = rport->remoteport;
2313                         if ((remote_port->port_id == 0) &&
2314                             (remote_port->port_role ==
2315                              FC_PORT_ROLE_NVME_DISCOVERY)) {
2316                                 remote_port->port_id = ndlp->nlp_DID;
2317                                 remote_port->port_role &=
2318                                         ~FC_PORT_ROLE_NVME_DISCOVERY;
2319                                 if (ndlp->nlp_type & NLP_NVME_TARGET)
2320                                         remote_port->port_role |=
2321                                                 FC_PORT_ROLE_NVME_TARGET;
2322                                 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2323                                         remote_port->port_role |=
2324                                                 FC_PORT_ROLE_NVME_INITIATOR;
2325
2326                                 lpfc_printf_vlog(ndlp->vport, KERN_INFO,
2327                                                  LOG_NVME_DISC,
2328                                                  "6014 Rebinding lport to "
2329                                                  "rport wwpn 0x%llx, "
2330                                                  "Data: x%x x%x x%x x%06x\n",
2331                                                  remote_port->port_name,
2332                                                  remote_port->port_id,
2333                                                  remote_port->port_role,
2334                                                  ndlp->nlp_type,
2335                                                  ndlp->nlp_DID);
2336                         }
2337                         return 0;
2338                 }
2339
2340                 /* NVME rports are not preserved across devloss.
2341                  * Just register this instance.
2342                  */
2343                 rpinfo.port_id = ndlp->nlp_DID;
2344                 rpinfo.port_role = 0;
2345                 if (ndlp->nlp_type & NLP_NVME_TARGET)
2346                         rpinfo.port_role |= FC_PORT_ROLE_NVME_TARGET;
2347                 if (ndlp->nlp_type & NLP_NVME_INITIATOR)
2348                         rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
2349                 rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
2350                 rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
2351
2352                 ret = nvme_fc_register_remoteport(localport, &rpinfo,
2353                                                   &remote_port);
2354                 if (!ret) {
2355                         rport = remote_port->private;
2356                         rport->remoteport = remote_port;
2357                         rport->lport = lport;
2358                         rport->ndlp = lpfc_nlp_get(ndlp);
2359                         if (!rport->ndlp)
2360                                 return -1;
2361                         ndlp->nrport = rport;
2362                         INIT_LIST_HEAD(&rport->list);
2363                         list_add_tail(&rport->list, &lport->rport_list);
2364                         lpfc_printf_vlog(vport, KERN_INFO,
2365                                          LOG_NVME_DISC | LOG_NODE,
2366                                          "6022 Binding new rport to lport %p "
2367                                          "Rport WWNN 0x%llx, Rport WWPN 0x%llx "
2368                                          "DID x%06x Role x%x\n",
2369                                          lport,
2370                                          rpinfo.node_name, rpinfo.port_name,
2371                                          rpinfo.port_id, rpinfo.port_role);
2372                 } else {
2373                         lpfc_printf_vlog(vport, KERN_ERR,
2374                                          LOG_NVME_DISC | LOG_NODE,
2375                                          "6031 RemotePort Registration failed "
2376                                          "err: %d, DID x%06x\n",
2377                                          ret, ndlp->nlp_DID);
2378                 }
2379         } else {
2380                 ret = -EINVAL;
2381                 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2382                                  "6027 Unknown nlp_type x%x on DID x%06x "
2383                                  "ndlp %p.  Not Registering nvme rport\n",
2384                                  ndlp->nlp_type, ndlp->nlp_DID, ndlp);
2385         }
2386         return ret;
2387 }
2388
2389 /* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
2390  *
2391  * There is no notion of Devloss or rport recovery from the current
2392  * nvme_transport perspective.  Loss of an rport just means IO cannot
2393  * be sent and recovery is completely up to the initator.
2394  * For now, the driver just unbinds the DID and port_role so that
2395  * no further IO can be issued.  Changes are planned for later.
2396  *
2397  * Notes - the ndlp reference count is not decremented here since
2398  * since there is no nvme_transport api for devloss.  Node ref count
2399  * is only adjusted in driver unload.
2400  */
2401 void
2402 lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2403 {
2404         int ret;
2405         struct nvme_fc_local_port *localport;
2406         struct lpfc_nvme_lport *lport;
2407         struct lpfc_nvme_rport *rport;
2408         struct nvme_fc_remote_port *remoteport;
2409
2410         localport = vport->localport;
2411
2412         /* This is fundamental error.  The localport is always
2413          * available until driver unload.  Just exit.
2414          */
2415         if (!localport)
2416                 return;
2417
2418         lport = (struct lpfc_nvme_lport *)localport->private;
2419         if (!lport)
2420                 goto input_err;
2421
2422         rport = ndlp->nrport;
2423         if (!rport)
2424                 goto input_err;
2425
2426         remoteport = rport->remoteport;
2427         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC,
2428                          "6033 Unreg nvme remoteport %p, portname x%llx, "
2429                          "port_id x%06x, portstate x%x port type x%x\n",
2430                          remoteport, remoteport->port_name,
2431                          remoteport->port_id, remoteport->port_state,
2432                          ndlp->nlp_type);
2433
2434         /* Sanity check ndlp type.  Only call for NVME ports. Don't
2435          * clear any rport state until the transport calls back.
2436          */
2437         if (ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_INITIATOR)) {
2438                 init_completion(&rport->rport_unreg_done);
2439                 ret = nvme_fc_unregister_remoteport(remoteport);
2440                 if (ret != 0) {
2441                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2442                                          "6167 NVME unregister failed %d "
2443                                          "port_state x%x\n",
2444                                          ret, remoteport->port_state);
2445                 }
2446
2447                 /* Wait for the driver's delete completion routine to finish
2448                  * before proceeding.  This guarantees the transport and driver
2449                  * have completed the unreg process.
2450                  */
2451                 ret = wait_for_completion_timeout(&rport->rport_unreg_done, 5);
2452                 if (ret == 0) {
2453                         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2454                                          "6169 Unreg nvme wait failed %d\n",
2455                                          ret);
2456                 }
2457         }
2458         return;
2459
2460  input_err:
2461         lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
2462                          "6168: State error: lport %p, rport%p FCID x%06x\n",
2463                          vport->localport, ndlp->rport, ndlp->nlp_DID);
2464 }