]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/lpfc/lpfc_nvmet.c
scsi: lpfc: code cleanups in NVME initiator discovery
[karo-tx-linux.git] / drivers / scsi / lpfc / lpfc_nvmet.c
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channsel Host Bus Adapters.                               *
4  * Copyright (C) 2017 Broadcom. All Rights Reserved. The term      *
5  * “Broadcom” refers to Broadcom Limited and/or its subsidiaries.  *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
38
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
41
42 #include "lpfc_version.h"
43 #include "lpfc_hw4.h"
44 #include "lpfc_hw.h"
45 #include "lpfc_sli.h"
46 #include "lpfc_sli4.h"
47 #include "lpfc_nl.h"
48 #include "lpfc_disc.h"
49 #include "lpfc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
57
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59                                                  struct lpfc_nvmet_rcv_ctx *,
60                                                  dma_addr_t rspbuf,
61                                                  uint16_t rspsize);
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63                                                   struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65                                           struct lpfc_nvmet_rcv_ctx *,
66                                           uint32_t, uint16_t);
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68                                             struct lpfc_nvmet_rcv_ctx *,
69                                             uint32_t, uint16_t);
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71                                            struct lpfc_nvmet_rcv_ctx *,
72                                            uint32_t, uint16_t);
73
74 /**
75  * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
76  * @phba: Pointer to HBA context object.
77  * @cmdwqe: Pointer to driver command WQE object.
78  * @wcqe: Pointer to driver response CQE object.
79  *
80  * The function is called from SLI ring event handler with no
81  * lock held. This function is the completion handler for NVME LS commands
82  * The function frees memory resources used for the NVME commands.
83  **/
84 static void
85 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
86                           struct lpfc_wcqe_complete *wcqe)
87 {
88         struct lpfc_nvmet_tgtport *tgtp;
89         struct nvmefc_tgt_ls_req *rsp;
90         struct lpfc_nvmet_rcv_ctx *ctxp;
91         uint32_t status, result;
92
93         status = bf_get(lpfc_wcqe_c_status, wcqe);
94         result = wcqe->parameter;
95         if (!phba->targetport)
96                 goto out;
97
98         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
99
100         if (status)
101                 atomic_inc(&tgtp->xmt_ls_rsp_error);
102         else
103                 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
104
105 out:
106         ctxp = cmdwqe->context2;
107         rsp = &ctxp->ctx.ls_req;
108
109         lpfc_nvmeio_data(phba, "NVMET LS  CMPL: xri x%x stat x%x result x%x\n",
110                          ctxp->oxid, status, result);
111
112         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
113                         "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
114                         ctxp, status, result);
115
116         lpfc_nlp_put(cmdwqe->context1);
117         cmdwqe->context2 = NULL;
118         cmdwqe->context3 = NULL;
119         lpfc_sli_release_iocbq(phba, cmdwqe);
120         rsp->done(rsp);
121         kfree(ctxp);
122 }
123
124 /**
125  * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
126  * @phba: HBA buffer is associated with
127  * @ctxp: context to clean up
128  * @mp: Buffer to free
129  *
130  * Description: Frees the given DMA buffer in the appropriate way given by
131  * reposting it to its associated RQ so it can be reused.
132  *
133  * Notes: Takes phba->hbalock.  Can be called with or without other locks held.
134  *
135  * Returns: None
136  **/
137 void
138 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
139                    struct lpfc_dmabuf *mp)
140 {
141         if (ctxp) {
142                 if (ctxp->txrdy) {
143                         pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
144                                       ctxp->txrdy_phys);
145                         ctxp->txrdy = NULL;
146                         ctxp->txrdy_phys = 0;
147                 }
148                 ctxp->state = LPFC_NVMET_STE_FREE;
149         }
150         lpfc_rq_buf_free(phba, mp);
151 }
152
153 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
154 static void
155 lpfc_nvmet_ktime(struct lpfc_hba *phba,
156                  struct lpfc_nvmet_rcv_ctx *ctxp)
157 {
158         uint64_t seg1, seg2, seg3, seg4, seg5;
159         uint64_t seg6, seg7, seg8, seg9, seg10;
160
161         if (!phba->ktime_on)
162                 return;
163
164         if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
165             !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
166             !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
167             !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
168             !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
169                 return;
170
171         if (ctxp->ts_isr_cmd  > ctxp->ts_cmd_nvme)
172                 return;
173         if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
174                 return;
175         if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
176                 return;
177         if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
178                 return;
179         if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
180                 return;
181         if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
182                 return;
183         if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
184                 return;
185         if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
186                 return;
187         if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
188                 return;
189         /*
190          * Segment 1 - Time from FCP command received by MSI-X ISR
191          * to FCP command is passed to NVME Layer.
192          * Segment 2 - Time from FCP command payload handed
193          * off to NVME Layer to Driver receives a Command op
194          * from NVME Layer.
195          * Segment 3 - Time from Driver receives a Command op
196          * from NVME Layer to Command is put on WQ.
197          * Segment 4 - Time from Driver WQ put is done
198          * to MSI-X ISR for Command cmpl.
199          * Segment 5 - Time from MSI-X ISR for Command cmpl to
200          * Command cmpl is passed to NVME Layer.
201          * Segment 6 - Time from Command cmpl is passed to NVME
202          * Layer to Driver receives a RSP op from NVME Layer.
203          * Segment 7 - Time from Driver receives a RSP op from
204          * NVME Layer to WQ put is done on TRSP FCP Status.
205          * Segment 8 - Time from Driver WQ put is done on TRSP
206          * FCP Status to MSI-X ISR for TRSP cmpl.
207          * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
208          * TRSP cmpl is passed to NVME Layer.
209          * Segment 10 - Time from FCP command received by
210          * MSI-X ISR to command is completed on wire.
211          * (Segments 1 thru 8) for READDATA / WRITEDATA
212          * (Segments 1 thru 4) for READDATA_RSP
213          */
214         seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
215         seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
216         seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
217                 seg1 - seg2;
218         seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
219                 seg1 - seg2 - seg3;
220         seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
221                 seg1 - seg2 - seg3 - seg4;
222
223         /* For auto rsp commands seg6 thru seg10 will be 0 */
224         if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
225                 seg6 = (ctxp->ts_nvme_status -
226                         ctxp->ts_isr_cmd) -
227                         seg1 - seg2 - seg3 - seg4 - seg5;
228                 seg7 = (ctxp->ts_status_wqput -
229                         ctxp->ts_isr_cmd) -
230                         seg1 - seg2 - seg3 -
231                         seg4 - seg5 - seg6;
232                 seg8 = (ctxp->ts_isr_status -
233                         ctxp->ts_isr_cmd) -
234                         seg1 - seg2 - seg3 - seg4 -
235                         seg5 - seg6 - seg7;
236                 seg9 = (ctxp->ts_status_nvme -
237                         ctxp->ts_isr_cmd) -
238                         seg1 - seg2 - seg3 - seg4 -
239                         seg5 - seg6 - seg7 - seg8;
240                 seg10 = (ctxp->ts_isr_status -
241                         ctxp->ts_isr_cmd);
242         } else {
243                 seg6 =  0;
244                 seg7 =  0;
245                 seg8 =  0;
246                 seg9 =  0;
247                 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
248         }
249
250         phba->ktime_seg1_total += seg1;
251         if (seg1 < phba->ktime_seg1_min)
252                 phba->ktime_seg1_min = seg1;
253         else if (seg1 > phba->ktime_seg1_max)
254                 phba->ktime_seg1_max = seg1;
255
256         phba->ktime_seg2_total += seg2;
257         if (seg2 < phba->ktime_seg2_min)
258                 phba->ktime_seg2_min = seg2;
259         else if (seg2 > phba->ktime_seg2_max)
260                 phba->ktime_seg2_max = seg2;
261
262         phba->ktime_seg3_total += seg3;
263         if (seg3 < phba->ktime_seg3_min)
264                 phba->ktime_seg3_min = seg3;
265         else if (seg3 > phba->ktime_seg3_max)
266                 phba->ktime_seg3_max = seg3;
267
268         phba->ktime_seg4_total += seg4;
269         if (seg4 < phba->ktime_seg4_min)
270                 phba->ktime_seg4_min = seg4;
271         else if (seg4 > phba->ktime_seg4_max)
272                 phba->ktime_seg4_max = seg4;
273
274         phba->ktime_seg5_total += seg5;
275         if (seg5 < phba->ktime_seg5_min)
276                 phba->ktime_seg5_min = seg5;
277         else if (seg5 > phba->ktime_seg5_max)
278                 phba->ktime_seg5_max = seg5;
279
280         phba->ktime_data_samples++;
281         if (!seg6)
282                 goto out;
283
284         phba->ktime_seg6_total += seg6;
285         if (seg6 < phba->ktime_seg6_min)
286                 phba->ktime_seg6_min = seg6;
287         else if (seg6 > phba->ktime_seg6_max)
288                 phba->ktime_seg6_max = seg6;
289
290         phba->ktime_seg7_total += seg7;
291         if (seg7 < phba->ktime_seg7_min)
292                 phba->ktime_seg7_min = seg7;
293         else if (seg7 > phba->ktime_seg7_max)
294                 phba->ktime_seg7_max = seg7;
295
296         phba->ktime_seg8_total += seg8;
297         if (seg8 < phba->ktime_seg8_min)
298                 phba->ktime_seg8_min = seg8;
299         else if (seg8 > phba->ktime_seg8_max)
300                 phba->ktime_seg8_max = seg8;
301
302         phba->ktime_seg9_total += seg9;
303         if (seg9 < phba->ktime_seg9_min)
304                 phba->ktime_seg9_min = seg9;
305         else if (seg9 > phba->ktime_seg9_max)
306                 phba->ktime_seg9_max = seg9;
307 out:
308         phba->ktime_seg10_total += seg10;
309         if (seg10 < phba->ktime_seg10_min)
310                 phba->ktime_seg10_min = seg10;
311         else if (seg10 > phba->ktime_seg10_max)
312                 phba->ktime_seg10_max = seg10;
313         phba->ktime_status_samples++;
314 }
315 #endif
316
317 /**
318  * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
319  * @phba: Pointer to HBA context object.
320  * @cmdwqe: Pointer to driver command WQE object.
321  * @wcqe: Pointer to driver response CQE object.
322  *
323  * The function is called from SLI ring event handler with no
324  * lock held. This function is the completion handler for NVME FCP commands
325  * The function frees memory resources used for the NVME commands.
326  **/
327 static void
328 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
329                           struct lpfc_wcqe_complete *wcqe)
330 {
331         struct lpfc_nvmet_tgtport *tgtp;
332         struct nvmefc_tgt_fcp_req *rsp;
333         struct lpfc_nvmet_rcv_ctx *ctxp;
334         uint32_t status, result, op, start_clean;
335 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
336         uint32_t id;
337 #endif
338
339         ctxp = cmdwqe->context2;
340         rsp = &ctxp->ctx.fcp_req;
341         op = rsp->op;
342         ctxp->flag &= ~LPFC_NVMET_IO_INP;
343
344         status = bf_get(lpfc_wcqe_c_status, wcqe);
345         result = wcqe->parameter;
346
347         if (!phba->targetport)
348                 goto out;
349
350         lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
351                          ctxp->oxid, op, status);
352
353         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
354         if (status) {
355                 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
356                 rsp->transferred_length = 0;
357                 atomic_inc(&tgtp->xmt_fcp_rsp_error);
358         } else {
359                 rsp->fcp_error = NVME_SC_SUCCESS;
360                 if (op == NVMET_FCOP_RSP)
361                         rsp->transferred_length = rsp->rsplen;
362                 else
363                         rsp->transferred_length = rsp->transfer_length;
364                 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
365         }
366
367 out:
368         if ((op == NVMET_FCOP_READDATA_RSP) ||
369             (op == NVMET_FCOP_RSP)) {
370                 /* Sanity check */
371                 ctxp->state = LPFC_NVMET_STE_DONE;
372                 ctxp->entry_cnt++;
373 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
374                 if (phba->ktime_on) {
375                         if (rsp->op == NVMET_FCOP_READDATA_RSP) {
376                                 ctxp->ts_isr_data =
377                                         cmdwqe->isr_timestamp;
378                                 ctxp->ts_data_nvme =
379                                         ktime_get_ns();
380                                 ctxp->ts_nvme_status =
381                                         ctxp->ts_data_nvme;
382                                 ctxp->ts_status_wqput =
383                                         ctxp->ts_data_nvme;
384                                 ctxp->ts_isr_status =
385                                         ctxp->ts_data_nvme;
386                                 ctxp->ts_status_nvme =
387                                         ctxp->ts_data_nvme;
388                         } else {
389                                 ctxp->ts_isr_status =
390                                         cmdwqe->isr_timestamp;
391                                 ctxp->ts_status_nvme =
392                                         ktime_get_ns();
393                         }
394                 }
395                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
396                         id = smp_processor_id();
397                         if (ctxp->cpu != id)
398                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
399                                                 "6703 CPU Check cmpl: "
400                                                 "cpu %d expect %d\n",
401                                                 id, ctxp->cpu);
402                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
403                                 phba->cpucheck_cmpl_io[id]++;
404                 }
405 #endif
406                 rsp->done(rsp);
407 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
408                 if (phba->ktime_on)
409                         lpfc_nvmet_ktime(phba, ctxp);
410 #endif
411                 /* Let Abort cmpl repost the context */
412                 if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
413                         lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
414         } else {
415                 ctxp->entry_cnt++;
416                 start_clean = offsetof(struct lpfc_iocbq, wqe);
417                 memset(((char *)cmdwqe) + start_clean, 0,
418                        (sizeof(struct lpfc_iocbq) - start_clean));
419 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
420                 if (phba->ktime_on) {
421                         ctxp->ts_isr_data = cmdwqe->isr_timestamp;
422                         ctxp->ts_data_nvme = ktime_get_ns();
423                 }
424                 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
425                         id = smp_processor_id();
426                         if (ctxp->cpu != id)
427                                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
428                                                 "6704 CPU Check cmdcmpl: "
429                                                 "cpu %d expect %d\n",
430                                                 id, ctxp->cpu);
431                         if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
432                                 phba->cpucheck_ccmpl_io[id]++;
433                 }
434 #endif
435                 rsp->done(rsp);
436         }
437 }
438
439 static int
440 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
441                       struct nvmefc_tgt_ls_req *rsp)
442 {
443         struct lpfc_nvmet_rcv_ctx *ctxp =
444                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
445         struct lpfc_hba *phba = ctxp->phba;
446         struct hbq_dmabuf *nvmebuf =
447                 (struct hbq_dmabuf *)ctxp->rqb_buffer;
448         struct lpfc_iocbq *nvmewqeq;
449         struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
450         struct lpfc_dmabuf dmabuf;
451         struct ulp_bde64 bpl;
452         int rc;
453
454         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
455                         "6023 %s: Entrypoint ctx %p %p\n", __func__,
456                         ctxp, tgtport);
457
458         nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
459                                       rsp->rsplen);
460         if (nvmewqeq == NULL) {
461                 atomic_inc(&nvmep->xmt_ls_drop);
462                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
463                                 "6150 LS Drop IO x%x: Prep\n",
464                                 ctxp->oxid);
465                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
466                 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
467                                                 ctxp->sid, ctxp->oxid);
468                 return -ENOMEM;
469         }
470
471         /* Save numBdes for bpl2sgl */
472         nvmewqeq->rsvd2 = 1;
473         nvmewqeq->hba_wqidx = 0;
474         nvmewqeq->context3 = &dmabuf;
475         dmabuf.virt = &bpl;
476         bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
477         bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
478         bpl.tus.f.bdeSize = rsp->rsplen;
479         bpl.tus.f.bdeFlags = 0;
480         bpl.tus.w = le32_to_cpu(bpl.tus.w);
481
482         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
483         nvmewqeq->iocb_cmpl = NULL;
484         nvmewqeq->context2 = ctxp;
485
486         lpfc_nvmeio_data(phba, "NVMET LS  RESP: xri x%x wqidx x%x len x%x\n",
487                          ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
488
489         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
490         if (rc == WQE_SUCCESS) {
491                 /*
492                  * Okay to repost buffer here, but wait till cmpl
493                  * before freeing ctxp and iocbq.
494                  */
495                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
496                 ctxp->rqb_buffer = 0;
497                 atomic_inc(&nvmep->xmt_ls_rsp);
498                 return 0;
499         }
500         /* Give back resources */
501         atomic_inc(&nvmep->xmt_ls_drop);
502         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
503                         "6151 LS Drop IO x%x: Issue %d\n",
504                         ctxp->oxid, rc);
505
506         lpfc_nlp_put(nvmewqeq->context1);
507
508         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
509         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
510         return -ENXIO;
511 }
512
513 static int
514 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
515                       struct nvmefc_tgt_fcp_req *rsp)
516 {
517         struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
518         struct lpfc_nvmet_rcv_ctx *ctxp =
519                 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
520         struct lpfc_hba *phba = ctxp->phba;
521         struct lpfc_iocbq *nvmewqeq;
522         unsigned long iflags;
523         int rc, id;
524
525 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526         if (phba->ktime_on) {
527                 if (rsp->op == NVMET_FCOP_RSP)
528                         ctxp->ts_nvme_status = ktime_get_ns();
529                 else
530                         ctxp->ts_nvme_data = ktime_get_ns();
531         }
532         if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533                 id = smp_processor_id();
534                 ctxp->cpu = id;
535                 if (id < LPFC_CHECK_CPU_CNT)
536                         phba->cpucheck_xmt_io[id]++;
537                 if (rsp->hwqid != id) {
538                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
539                                         "6705 CPU Check OP: "
540                                         "cpu %d expect %d\n",
541                                         id, rsp->hwqid);
542                         ctxp->cpu = rsp->hwqid;
543                 }
544         }
545 #endif
546
547         if (rsp->op == NVMET_FCOP_ABORT) {
548                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
549                                 "6103 Abort op: oxri x%x %d cnt %d\n",
550                                 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
551
552                 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
553                                  "xri x%x state x%x cnt x%x\n",
554                                  ctxp->oxid, ctxp->state, ctxp->entry_cnt);
555
556                 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
557                 ctxp->entry_cnt++;
558                 ctxp->flag |= LPFC_NVMET_ABORT_OP;
559                 if (ctxp->flag & LPFC_NVMET_IO_INP)
560                         lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
561                                                        ctxp->oxid);
562                 else
563                         lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
564                                                          ctxp->oxid);
565                 return 0;
566         }
567
568         /* Sanity check */
569         if (ctxp->state == LPFC_NVMET_STE_ABORT) {
570                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
571                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
572                                 "6102 Bad state IO x%x aborted\n",
573                                 ctxp->oxid);
574                 rc = -ENXIO;
575                 goto aerr;
576         }
577
578         nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
579         if (nvmewqeq == NULL) {
580                 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
581                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
582                                 "6152 FCP Drop IO x%x: Prep\n",
583                                 ctxp->oxid);
584                 rc = -ENXIO;
585                 goto aerr;
586         }
587
588         nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
589         nvmewqeq->iocb_cmpl = NULL;
590         nvmewqeq->context2 = ctxp;
591         nvmewqeq->iocb_flag |=  LPFC_IO_NVMET;
592         ctxp->wqeq->hba_wqidx = rsp->hwqid;
593
594         lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
595                          ctxp->oxid, rsp->op, rsp->rsplen);
596
597         /* For now we take hbalock */
598         spin_lock_irqsave(&phba->hbalock, iflags);
599         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
600         spin_unlock_irqrestore(&phba->hbalock, iflags);
601         if (rc == WQE_SUCCESS) {
602                 ctxp->flag |= LPFC_NVMET_IO_INP;
603 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
604                 if (!phba->ktime_on)
605                         return 0;
606                 if (rsp->op == NVMET_FCOP_RSP)
607                         ctxp->ts_status_wqput = ktime_get_ns();
608                 else
609                         ctxp->ts_data_wqput = ktime_get_ns();
610 #endif
611                 return 0;
612         }
613
614         /* Give back resources */
615         atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
616         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
617                         "6153 FCP Drop IO x%x: Issue: %d\n",
618                         ctxp->oxid, rc);
619
620         ctxp->wqeq->hba_wqidx = 0;
621         nvmewqeq->context2 = NULL;
622         nvmewqeq->context3 = NULL;
623         rc = -EBUSY;
624 aerr:
625         return rc;
626 }
627
628 static void
629 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
630 {
631         struct lpfc_nvmet_tgtport *tport = targetport->private;
632
633         /* release any threads waiting for the unreg to complete */
634         complete(&tport->tport_unreg_done);
635 }
636
637 static struct nvmet_fc_target_template lpfc_tgttemplate = {
638         .targetport_delete = lpfc_nvmet_targetport_delete,
639         .xmt_ls_rsp     = lpfc_nvmet_xmt_ls_rsp,
640         .fcp_op         = lpfc_nvmet_xmt_fcp_op,
641
642         .max_hw_queues  = 1,
643         .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
644         .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
645         .dma_boundary = 0xFFFFFFFF,
646
647         /* optional features */
648         .target_features = 0,
649         /* sizes of additional private data for data structures */
650         .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
651 };
652
653 int
654 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
655 {
656         struct lpfc_vport  *vport = phba->pport;
657         struct lpfc_nvmet_tgtport *tgtp;
658         struct nvmet_fc_port_info pinfo;
659         int error = 0;
660
661         if (phba->targetport)
662                 return 0;
663
664         memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
665         pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
666         pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
667         pinfo.port_id = vport->fc_myDID;
668
669         lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
670         lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
671         lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
672                                            NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
673
674 #ifdef CONFIG_LPFC_NVME_TARGET
675         error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
676                                              &phba->pcidev->dev,
677                                              &phba->targetport);
678 #else
679         error = -ENOMEM;
680 #endif
681         if (error) {
682                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
683                                 "6025 Cannot register NVME targetport "
684                                 "x%x\n", error);
685                 phba->targetport = NULL;
686         } else {
687                 tgtp = (struct lpfc_nvmet_tgtport *)
688                         phba->targetport->private;
689                 tgtp->phba = phba;
690
691                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
692                                 "6026 Registered NVME "
693                                 "targetport: %p, private %p "
694                                 "portnm %llx nodenm %llx\n",
695                                 phba->targetport, tgtp,
696                                 pinfo.port_name, pinfo.node_name);
697
698                 atomic_set(&tgtp->rcv_ls_req_in, 0);
699                 atomic_set(&tgtp->rcv_ls_req_out, 0);
700                 atomic_set(&tgtp->rcv_ls_req_drop, 0);
701                 atomic_set(&tgtp->xmt_ls_abort, 0);
702                 atomic_set(&tgtp->xmt_ls_rsp, 0);
703                 atomic_set(&tgtp->xmt_ls_drop, 0);
704                 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
705                 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
706                 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
707                 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
708                 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
709                 atomic_set(&tgtp->xmt_fcp_abort, 0);
710                 atomic_set(&tgtp->xmt_fcp_drop, 0);
711                 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
712                 atomic_set(&tgtp->xmt_fcp_read, 0);
713                 atomic_set(&tgtp->xmt_fcp_write, 0);
714                 atomic_set(&tgtp->xmt_fcp_rsp, 0);
715                 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
716                 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
717                 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
718                 atomic_set(&tgtp->xmt_abort_rsp, 0);
719                 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
720                 atomic_set(&tgtp->xmt_abort_cmpl, 0);
721         }
722         return error;
723 }
724
725 int
726 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
727 {
728         struct lpfc_vport  *vport = phba->pport;
729
730         if (!phba->targetport)
731                 return 0;
732
733         lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
734                          "6007 Update NVMET port %p did x%x\n",
735                          phba->targetport, vport->fc_myDID);
736
737         phba->targetport->port_id = vport->fc_myDID;
738         return 0;
739 }
740
741 /**
742  * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
743  * @phba: pointer to lpfc hba data structure.
744  * @axri: pointer to the nvmet xri abort wcqe structure.
745  *
746  * This routine is invoked by the worker thread to process a SLI4 fast-path
747  * NVMET aborted xri.
748  **/
749 void
750 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
751                             struct sli4_wcqe_xri_aborted *axri)
752 {
753         /* TODO: work in progress */
754 }
755
756 void
757 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
758 {
759 #ifdef CONFIG_LPFC_NVME_TARGET
760         struct lpfc_nvmet_tgtport *tgtp;
761
762         if (phba->nvmet_support == 0)
763                 return;
764         if (phba->targetport) {
765                 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
766                 init_completion(&tgtp->tport_unreg_done);
767                 nvmet_fc_unregister_targetport(phba->targetport);
768                 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
769         }
770         phba->targetport = NULL;
771 #endif
772 }
773
774 /**
775  * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
776  * @phba: pointer to lpfc hba data structure.
777  * @pring: pointer to a SLI ring.
778  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
779  *
780  * This routine is used for processing the WQE associated with a unsolicited
781  * event. It first determines whether there is an existing ndlp that matches
782  * the DID from the unsolicited WQE. If not, it will create a new one with
783  * the DID from the unsolicited WQE. The ELS command from the unsolicited
784  * WQE is then used to invoke the proper routine and to set up proper state
785  * of the discovery state machine.
786  **/
787 static void
788 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
789                            struct hbq_dmabuf *nvmebuf)
790 {
791 #ifdef CONFIG_LPFC_NVME_TARGET
792         struct lpfc_nvmet_tgtport *tgtp;
793         struct fc_frame_header *fc_hdr;
794         struct lpfc_nvmet_rcv_ctx *ctxp;
795         uint32_t *payload;
796         uint32_t size, oxid, sid, rc;
797
798         if (!nvmebuf || !phba->targetport) {
799                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
800                                 "6154 LS Drop IO\n");
801                 oxid = 0;
802                 size = 0;
803                 sid = 0;
804                 goto dropit;
805         }
806
807         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
808         payload = (uint32_t *)(nvmebuf->dbuf.virt);
809         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
810         size = bf_get(lpfc_rcqe_length,  &nvmebuf->cq_event.cqe.rcqe_cmpl);
811         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
812         sid = sli4_sid_from_fc_hdr(fc_hdr);
813
814         ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
815         if (ctxp == NULL) {
816                 atomic_inc(&tgtp->rcv_ls_req_drop);
817                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
818                                 "6155 LS Drop IO x%x: Alloc\n",
819                                 oxid);
820 dropit:
821                 lpfc_nvmeio_data(phba, "NVMET LS  DROP: "
822                                  "xri x%x sz %d from %06x\n",
823                                  oxid, size, sid);
824                 if (nvmebuf)
825                         lpfc_in_buf_free(phba, &nvmebuf->dbuf);
826                 return;
827         }
828         ctxp->phba = phba;
829         ctxp->size = size;
830         ctxp->oxid = oxid;
831         ctxp->sid = sid;
832         ctxp->wqeq = NULL;
833         ctxp->state = LPFC_NVMET_STE_RCV;
834         ctxp->rqb_buffer = (void *)nvmebuf;
835
836         lpfc_nvmeio_data(phba, "NVMET LS   RCV: xri x%x sz %d from %06x\n",
837                          oxid, size, sid);
838         /*
839          * The calling sequence should be:
840          * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
841          * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
842          */
843         atomic_inc(&tgtp->rcv_ls_req_in);
844         rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
845                                  payload, size);
846
847         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
848                         "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
849                         "%08x %08x %08x\n", __func__, ctxp, size, rc,
850                         *payload, *(payload+1), *(payload+2),
851                         *(payload+3), *(payload+4), *(payload+5));
852
853         if (rc == 0) {
854                 atomic_inc(&tgtp->rcv_ls_req_out);
855                 return;
856         }
857
858         lpfc_nvmeio_data(phba, "NVMET LS  DROP: xri x%x sz %d from %06x\n",
859                          oxid, size, sid);
860
861         atomic_inc(&tgtp->rcv_ls_req_drop);
862         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
863                         "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
864                         ctxp->oxid, rc);
865
866         /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
867         if (nvmebuf)
868                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
869
870         atomic_inc(&tgtp->xmt_ls_abort);
871         lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
872 #endif
873 }
874
875 /**
876  * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
877  * @phba: pointer to lpfc hba data structure.
878  * @pring: pointer to a SLI ring.
879  * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
880  *
881  * This routine is used for processing the WQE associated with a unsolicited
882  * event. It first determines whether there is an existing ndlp that matches
883  * the DID from the unsolicited WQE. If not, it will create a new one with
884  * the DID from the unsolicited WQE. The ELS command from the unsolicited
885  * WQE is then used to invoke the proper routine and to set up proper state
886  * of the discovery state machine.
887  **/
888 static void
889 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
890                             struct lpfc_sli_ring *pring,
891                             struct rqb_dmabuf *nvmebuf,
892                             uint64_t isr_timestamp)
893 {
894 #ifdef CONFIG_LPFC_NVME_TARGET
895         struct lpfc_nvmet_rcv_ctx *ctxp;
896         struct lpfc_nvmet_tgtport *tgtp;
897         struct fc_frame_header *fc_hdr;
898         uint32_t *payload;
899         uint32_t size, oxid, sid, rc;
900 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
901         uint32_t id;
902 #endif
903
904         if (!nvmebuf || !phba->targetport) {
905                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
906                                 "6157 FCP Drop IO\n");
907                 oxid = 0;
908                 size = 0;
909                 sid = 0;
910                 goto dropit;
911         }
912
913
914         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
915         payload = (uint32_t *)(nvmebuf->dbuf.virt);
916         fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
917         size = nvmebuf->bytes_recv;
918         oxid = be16_to_cpu(fc_hdr->fh_ox_id);
919         sid = sli4_sid_from_fc_hdr(fc_hdr);
920
921         ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
922         if (ctxp == NULL) {
923                 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
924                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
925                                 "6158 FCP Drop IO x%x: Alloc\n",
926                                 oxid);
927                 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
928                 /* Cannot send ABTS without context */
929                 return;
930         }
931         memset(ctxp, 0, sizeof(ctxp->ctx));
932         ctxp->wqeq = NULL;
933         ctxp->txrdy = NULL;
934         ctxp->offset = 0;
935         ctxp->phba = phba;
936         ctxp->size = size;
937         ctxp->oxid = oxid;
938         ctxp->sid = sid;
939         ctxp->state = LPFC_NVMET_STE_RCV;
940         ctxp->rqb_buffer = nvmebuf;
941         ctxp->entry_cnt = 1;
942         ctxp->flag = 0;
943
944 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
945         if (phba->ktime_on) {
946                 ctxp->ts_isr_cmd = isr_timestamp;
947                 ctxp->ts_cmd_nvme = ktime_get_ns();
948                 ctxp->ts_nvme_data = 0;
949                 ctxp->ts_data_wqput = 0;
950                 ctxp->ts_isr_data = 0;
951                 ctxp->ts_data_nvme = 0;
952                 ctxp->ts_nvme_status = 0;
953                 ctxp->ts_status_wqput = 0;
954                 ctxp->ts_isr_status = 0;
955                 ctxp->ts_status_nvme = 0;
956         }
957
958         if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
959                 id = smp_processor_id();
960                 if (id < LPFC_CHECK_CPU_CNT)
961                         phba->cpucheck_rcv_io[id]++;
962         }
963 #endif
964
965         lpfc_nvmeio_data(phba, "NVMET FCP  RCV: xri x%x sz %d from %06x\n",
966                          oxid, size, sid);
967
968         atomic_inc(&tgtp->rcv_fcp_cmd_in);
969         /*
970          * The calling sequence should be:
971          * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
972          * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
973          */
974         rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
975                                   payload, size);
976
977         /* Process FCP command */
978         if (rc == 0) {
979                 atomic_inc(&tgtp->rcv_fcp_cmd_out);
980                 return;
981         }
982
983         atomic_inc(&tgtp->rcv_fcp_cmd_drop);
984         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
985                         "6159 FCP Drop IO x%x: err x%x\n",
986                         ctxp->oxid, rc);
987 dropit:
988         lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
989                          oxid, size, sid);
990         if (oxid) {
991                 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
992                 return;
993         }
994
995         if (nvmebuf) {
996                 nvmebuf->iocbq->hba_wqidx = 0;
997                 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
998                 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
999         }
1000 #endif
1001 }
1002
1003 /**
1004  * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1005  * @phba: pointer to lpfc hba data structure.
1006  * @pring: pointer to a SLI ring.
1007  * @nvmebuf: pointer to received nvme data structure.
1008  *
1009  * This routine is used to process an unsolicited event received from a SLI
1010  * (Service Level Interface) ring. The actual processing of the data buffer
1011  * associated with the unsolicited event is done by invoking the routine
1012  * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1013  * SLI RQ on which the unsolicited event was received.
1014  **/
1015 void
1016 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1017                           struct lpfc_iocbq *piocb)
1018 {
1019         struct lpfc_dmabuf *d_buf;
1020         struct hbq_dmabuf *nvmebuf;
1021
1022         d_buf = piocb->context2;
1023         nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1024
1025         if (phba->nvmet_support == 0) {
1026                 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1027                 return;
1028         }
1029         lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1030 }
1031
1032 /**
1033  * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1034  * @phba: pointer to lpfc hba data structure.
1035  * @pring: pointer to a SLI ring.
1036  * @nvmebuf: pointer to received nvme data structure.
1037  *
1038  * This routine is used to process an unsolicited event received from a SLI
1039  * (Service Level Interface) ring. The actual processing of the data buffer
1040  * associated with the unsolicited event is done by invoking the routine
1041  * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1042  * SLI RQ on which the unsolicited event was received.
1043  **/
1044 void
1045 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1046                            struct lpfc_sli_ring *pring,
1047                            struct rqb_dmabuf *nvmebuf,
1048                            uint64_t isr_timestamp)
1049 {
1050         if (phba->nvmet_support == 0) {
1051                 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1052                 return;
1053         }
1054         lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1055                                     isr_timestamp);
1056 }
1057
1058 /**
1059  * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1060  * @phba: pointer to a host N_Port data structure.
1061  * @ctxp: Context info for NVME LS Request
1062  * @rspbuf: DMA buffer of NVME command.
1063  * @rspsize: size of the NVME command.
1064  *
1065  * This routine is used for allocating a lpfc-WQE data structure from
1066  * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1067  * passed into the routine for discovery state machine to issue an Extended
1068  * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1069  * and preparation routine that is used by all the discovery state machine
1070  * routines and the NVME command-specific fields will be later set up by
1071  * the individual discovery machine routines after calling this routine
1072  * allocating and preparing a generic WQE data structure. It fills in the
1073  * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1074  * payload and response payload (if expected). The reference count on the
1075  * ndlp is incremented by 1 and the reference to the ndlp is put into
1076  * context1 of the WQE data structure for this WQE to hold the ndlp
1077  * reference for the command's callback function to access later.
1078  *
1079  * Return code
1080  *   Pointer to the newly allocated/prepared nvme wqe data structure
1081  *   NULL - when nvme wqe data structure allocation/preparation failed
1082  **/
1083 static struct lpfc_iocbq *
1084 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1085                        struct lpfc_nvmet_rcv_ctx *ctxp,
1086                        dma_addr_t rspbuf, uint16_t rspsize)
1087 {
1088         struct lpfc_nodelist *ndlp;
1089         struct lpfc_iocbq *nvmewqe;
1090         union lpfc_wqe *wqe;
1091
1092         if (!lpfc_is_link_up(phba)) {
1093                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1094                                 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1095                                 "NPORT x%x oxid:x%x\n",
1096                                 ctxp->sid, ctxp->oxid);
1097                 return NULL;
1098         }
1099
1100         /* Allocate buffer for  command wqe */
1101         nvmewqe = lpfc_sli_get_iocbq(phba);
1102         if (nvmewqe == NULL) {
1103                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1104                                 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1105                                 "NPORT x%x oxid:x%x\n",
1106                                 ctxp->sid, ctxp->oxid);
1107                 return NULL;
1108         }
1109
1110         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1111         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1112             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1113             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1114                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1115                                 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1116                                 "NPORT x%x oxid:x%x\n",
1117                                 ctxp->sid, ctxp->oxid);
1118                 goto nvme_wqe_free_wqeq_exit;
1119         }
1120         ctxp->wqeq = nvmewqe;
1121
1122         /* prevent preparing wqe with NULL ndlp reference */
1123         nvmewqe->context1 = lpfc_nlp_get(ndlp);
1124         if (nvmewqe->context1 == NULL)
1125                 goto nvme_wqe_free_wqeq_exit;
1126         nvmewqe->context2 = ctxp;
1127
1128         wqe = &nvmewqe->wqe;
1129         memset(wqe, 0, sizeof(union lpfc_wqe));
1130
1131         /* Words 0 - 2 */
1132         wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1133         wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1134         wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1135         wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1136
1137         /* Word 3 */
1138
1139         /* Word 4 */
1140
1141         /* Word 5 */
1142         bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1143         bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1144         bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1145         bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1146         bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1147
1148         /* Word 6 */
1149         bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1150                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1151         bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1152
1153         /* Word 7 */
1154         bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1155                CMD_XMIT_SEQUENCE64_WQE);
1156         bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1157         bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1158         bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1159
1160         /* Word 8 */
1161         wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1162
1163         /* Word 9 */
1164         bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1165         /* Needs to be set by caller */
1166         bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1167
1168         /* Word 10 */
1169         bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1170         bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1171         bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1172                LPFC_WQE_LENLOC_WORD12);
1173         bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1174
1175         /* Word 11 */
1176         bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1177                LPFC_WQE_CQ_ID_DEFAULT);
1178         bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1179                OTHER_COMMAND);
1180
1181         /* Word 12 */
1182         wqe->xmit_sequence.xmit_len = rspsize;
1183
1184         nvmewqe->retry = 1;
1185         nvmewqe->vport = phba->pport;
1186         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1187         nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1188
1189         /* Xmit NVME response to remote NPORT <did> */
1190         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1191                         "6039 Xmit NVME LS response to remote "
1192                         "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1193                         ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1194                         rspsize);
1195         return nvmewqe;
1196
1197 nvme_wqe_free_wqeq_exit:
1198         nvmewqe->context2 = NULL;
1199         nvmewqe->context3 = NULL;
1200         lpfc_sli_release_iocbq(phba, nvmewqe);
1201         return NULL;
1202 }
1203
1204
1205 static struct lpfc_iocbq *
1206 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1207                         struct lpfc_nvmet_rcv_ctx *ctxp)
1208 {
1209         struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1210         struct lpfc_nvmet_tgtport *tgtp;
1211         struct sli4_sge *sgl;
1212         struct lpfc_nodelist *ndlp;
1213         struct lpfc_iocbq *nvmewqe;
1214         struct scatterlist *sgel;
1215         union lpfc_wqe128 *wqe;
1216         uint32_t *txrdy;
1217         dma_addr_t physaddr;
1218         int i, cnt;
1219         int xc = 1;
1220
1221         if (!lpfc_is_link_up(phba)) {
1222                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1223                                 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1224                                 "NPORT x%x oxid:x%x\n", ctxp->sid,
1225                                 ctxp->oxid);
1226                 return NULL;
1227         }
1228
1229         ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1230         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1231             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1232              (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1233                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1234                                 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1235                                 "NPORT x%x oxid:x%x\n",
1236                                 ctxp->sid, ctxp->oxid);
1237                 return NULL;
1238         }
1239
1240         if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
1241                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1242                                 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1243                                 "NPORT x%x oxid:x%x\n",
1244                                 ctxp->sid, ctxp->oxid);
1245                 return NULL;
1246         }
1247
1248         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1249         nvmewqe = ctxp->wqeq;
1250         if (nvmewqe == NULL) {
1251                 /* Allocate buffer for  command wqe */
1252                 nvmewqe = ctxp->rqb_buffer->iocbq;
1253                 if (nvmewqe == NULL) {
1254                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1255                                         "6110 lpfc_nvmet_prep_fcp_wqe: No "
1256                                         "WQE: NPORT x%x oxid:x%x\n",
1257                                         ctxp->sid, ctxp->oxid);
1258                         return NULL;
1259                 }
1260                 ctxp->wqeq = nvmewqe;
1261                 xc = 0; /* create new XRI */
1262                 nvmewqe->sli4_lxritag = NO_XRI;
1263                 nvmewqe->sli4_xritag = NO_XRI;
1264         }
1265
1266         /* Sanity check */
1267         if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1268             (ctxp->entry_cnt == 1)) ||
1269             ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1270             (ctxp->entry_cnt > 1))) {
1271                 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1272         } else {
1273                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1274                                 "6111 Wrong state %s: %d  cnt %d\n",
1275                                 __func__, ctxp->state, ctxp->entry_cnt);
1276                 return NULL;
1277         }
1278
1279         sgl  = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
1280         switch (rsp->op) {
1281         case NVMET_FCOP_READDATA:
1282         case NVMET_FCOP_READDATA_RSP:
1283                 /* Words 0 - 2 : The first sg segment */
1284                 sgel = &rsp->sg[0];
1285                 physaddr = sg_dma_address(sgel);
1286                 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1287                 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1288                 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1289                 wqe->fcp_tsend.bde.addrHigh =
1290                         cpu_to_le32(putPaddrHigh(physaddr));
1291
1292                 /* Word 3 */
1293                 wqe->fcp_tsend.payload_offset_len = 0;
1294
1295                 /* Word 4 */
1296                 wqe->fcp_tsend.relative_offset = ctxp->offset;
1297
1298                 /* Word 5 */
1299
1300                 /* Word 6 */
1301                 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1302                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1303                 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1304                        nvmewqe->sli4_xritag);
1305
1306                 /* Word 7 */
1307                 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1308
1309                 /* Word 8 */
1310                 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1311
1312                 /* Word 9 */
1313                 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1314                 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1315
1316                 /* Word 10 */
1317                 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1318                 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1319                 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1320                 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1321                        LPFC_WQE_LENLOC_WORD12);
1322                 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1323                 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1324                 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1325                 if (phba->cfg_nvme_oas)
1326                         bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1327
1328                 /* Word 11 */
1329                 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1330                        LPFC_WQE_CQ_ID_DEFAULT);
1331                 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1332                        FCP_COMMAND_TSEND);
1333
1334                 /* Word 12 */
1335                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1336
1337                 /* Setup 2 SKIP SGEs */
1338                 sgl->addr_hi = 0;
1339                 sgl->addr_lo = 0;
1340                 sgl->word2 = 0;
1341                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1342                 sgl->word2 = cpu_to_le32(sgl->word2);
1343                 sgl->sge_len = 0;
1344                 sgl++;
1345                 sgl->addr_hi = 0;
1346                 sgl->addr_lo = 0;
1347                 sgl->word2 = 0;
1348                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1349                 sgl->word2 = cpu_to_le32(sgl->word2);
1350                 sgl->sge_len = 0;
1351                 sgl++;
1352                 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1353                         atomic_inc(&tgtp->xmt_fcp_read_rsp);
1354                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1355                         if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1356                             (rsp->rsplen == 12)) {
1357                                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1358                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1359                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1360                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1361                         } else {
1362                                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1363                                 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1364                                 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1365                                 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1366                                        ((rsp->rsplen >> 2) - 1));
1367                                 memcpy(&wqe->words[16], rsp->rspaddr,
1368                                        rsp->rsplen);
1369                         }
1370                 } else {
1371                         atomic_inc(&tgtp->xmt_fcp_read);
1372
1373                         bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1374                         bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1375                         bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1376                         bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1377                         bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1378                 }
1379                 ctxp->state = LPFC_NVMET_STE_DATA;
1380                 break;
1381
1382         case NVMET_FCOP_WRITEDATA:
1383                 /* Words 0 - 2 : The first sg segment */
1384                 txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1385                                        GFP_KERNEL, &physaddr);
1386                 if (!txrdy) {
1387                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1388                                         "6041 Bad txrdy buffer: oxid x%x\n",
1389                                         ctxp->oxid);
1390                         return NULL;
1391                 }
1392                 ctxp->txrdy = txrdy;
1393                 ctxp->txrdy_phys = physaddr;
1394                 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1395                 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1396                 wqe->fcp_treceive.bde.addrLow =
1397                         cpu_to_le32(putPaddrLow(physaddr));
1398                 wqe->fcp_treceive.bde.addrHigh =
1399                         cpu_to_le32(putPaddrHigh(physaddr));
1400
1401                 /* Word 3 */
1402                 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1403
1404                 /* Word 4 */
1405                 wqe->fcp_treceive.relative_offset = ctxp->offset;
1406
1407                 /* Word 5 */
1408
1409                 /* Word 6 */
1410                 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1411                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1412                 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1413                        nvmewqe->sli4_xritag);
1414
1415                 /* Word 7 */
1416                 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1417                 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1418                        CMD_FCP_TRECEIVE64_WQE);
1419
1420                 /* Word 8 */
1421                 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1422
1423                 /* Word 9 */
1424                 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1425                 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1426
1427                 /* Word 10 */
1428                 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1429                 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1430                 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1431                 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1432                        LPFC_WQE_LENLOC_WORD12);
1433                 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1434                 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1435                 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1436                 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1437                 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1438                 if (phba->cfg_nvme_oas)
1439                         bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1440
1441                 /* Word 11 */
1442                 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1443                        LPFC_WQE_CQ_ID_DEFAULT);
1444                 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1445                        FCP_COMMAND_TRECEIVE);
1446                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1447
1448                 /* Word 12 */
1449                 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1450
1451                 /* Setup 1 TXRDY and 1 SKIP SGE */
1452                 txrdy[0] = 0;
1453                 txrdy[1] = cpu_to_be32(rsp->transfer_length);
1454                 txrdy[2] = 0;
1455
1456                 sgl->addr_hi = putPaddrHigh(physaddr);
1457                 sgl->addr_lo = putPaddrLow(physaddr);
1458                 sgl->word2 = 0;
1459                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1460                 sgl->word2 = cpu_to_le32(sgl->word2);
1461                 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1462                 sgl++;
1463                 sgl->addr_hi = 0;
1464                 sgl->addr_lo = 0;
1465                 sgl->word2 = 0;
1466                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1467                 sgl->word2 = cpu_to_le32(sgl->word2);
1468                 sgl->sge_len = 0;
1469                 sgl++;
1470                 ctxp->state = LPFC_NVMET_STE_DATA;
1471                 atomic_inc(&tgtp->xmt_fcp_write);
1472                 break;
1473
1474         case NVMET_FCOP_RSP:
1475                 /* Words 0 - 2 */
1476                 physaddr = rsp->rspdma;
1477                 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1478                 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1479                 wqe->fcp_trsp.bde.addrLow =
1480                         cpu_to_le32(putPaddrLow(physaddr));
1481                 wqe->fcp_trsp.bde.addrHigh =
1482                         cpu_to_le32(putPaddrHigh(physaddr));
1483
1484                 /* Word 3 */
1485                 wqe->fcp_trsp.response_len = rsp->rsplen;
1486
1487                 /* Word 4 */
1488                 wqe->fcp_trsp.rsvd_4_5[0] = 0;
1489
1490
1491                 /* Word 5 */
1492
1493                 /* Word 6 */
1494                 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1495                        phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1496                 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1497                        nvmewqe->sli4_xritag);
1498
1499                 /* Word 7 */
1500                 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1501                 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1502
1503                 /* Word 8 */
1504                 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1505
1506                 /* Word 9 */
1507                 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1508                 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1509
1510                 /* Word 10 */
1511                 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1512                 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1513                 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1514                 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1515                        LPFC_WQE_LENLOC_WORD3);
1516                 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1517                 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1518                 if (phba->cfg_nvme_oas)
1519                         bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1520
1521                 /* Word 11 */
1522                 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1523                        LPFC_WQE_CQ_ID_DEFAULT);
1524                 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1525                        FCP_COMMAND_TRSP);
1526                 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1527                 ctxp->state = LPFC_NVMET_STE_RSP;
1528
1529                 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1530                         /* Good response - all zero's on wire */
1531                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1532                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1533                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1534                 } else {
1535                         bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1536                         bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1537                         bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1538                                ((rsp->rsplen >> 2) - 1));
1539                         memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1540                 }
1541
1542                 /* Use rspbuf, NOT sg list */
1543                 rsp->sg_cnt = 0;
1544                 sgl->word2 = 0;
1545                 atomic_inc(&tgtp->xmt_fcp_rsp);
1546                 break;
1547
1548         default:
1549                 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1550                                 "6064 Unknown Rsp Op %d\n",
1551                                 rsp->op);
1552                 return NULL;
1553         }
1554
1555         nvmewqe->retry = 1;
1556         nvmewqe->vport = phba->pport;
1557         nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1558         nvmewqe->context1 = ndlp;
1559
1560         for (i = 0; i < rsp->sg_cnt; i++) {
1561                 sgel = &rsp->sg[i];
1562                 physaddr = sg_dma_address(sgel);
1563                 cnt = sg_dma_len(sgel);
1564                 sgl->addr_hi = putPaddrHigh(physaddr);
1565                 sgl->addr_lo = putPaddrLow(physaddr);
1566                 sgl->word2 = 0;
1567                 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1568                 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
1569                 if ((i+1) == rsp->sg_cnt)
1570                         bf_set(lpfc_sli4_sge_last, sgl, 1);
1571                 sgl->word2 = cpu_to_le32(sgl->word2);
1572                 sgl->sge_len = cpu_to_le32(cnt);
1573                 sgl++;
1574                 ctxp->offset += cnt;
1575         }
1576         return nvmewqe;
1577 }
1578
1579 /**
1580  * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1581  * @phba: Pointer to HBA context object.
1582  * @cmdwqe: Pointer to driver command WQE object.
1583  * @wcqe: Pointer to driver response CQE object.
1584  *
1585  * The function is called from SLI ring event handler with no
1586  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1587  * The function frees memory resources used for the NVME commands.
1588  **/
1589 static void
1590 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1591                              struct lpfc_wcqe_complete *wcqe)
1592 {
1593         struct lpfc_nvmet_rcv_ctx *ctxp;
1594         struct lpfc_nvmet_tgtport *tgtp;
1595         uint32_t status, result;
1596
1597         ctxp = cmdwqe->context2;
1598         status = bf_get(lpfc_wcqe_c_status, wcqe);
1599         result = wcqe->parameter;
1600
1601         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1602         atomic_inc(&tgtp->xmt_abort_cmpl);
1603
1604         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1605                         "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
1606                         ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
1607                         result, wcqe->word3);
1608
1609         ctxp->state = LPFC_NVMET_STE_DONE;
1610         lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1611
1612         cmdwqe->context2 = NULL;
1613         cmdwqe->context3 = NULL;
1614         lpfc_sli_release_iocbq(phba, cmdwqe);
1615 }
1616
1617 /**
1618  * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
1619  * @phba: Pointer to HBA context object.
1620  * @cmdwqe: Pointer to driver command WQE object.
1621  * @wcqe: Pointer to driver response CQE object.
1622  *
1623  * The function is called from SLI ring event handler with no
1624  * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1625  * The function frees memory resources used for the NVME commands.
1626  **/
1627 static void
1628 lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1629                              struct lpfc_wcqe_complete *wcqe)
1630 {
1631         struct lpfc_nvmet_rcv_ctx *ctxp;
1632         struct lpfc_nvmet_tgtport *tgtp;
1633         uint32_t status, result;
1634
1635         ctxp = cmdwqe->context2;
1636         status = bf_get(lpfc_wcqe_c_status, wcqe);
1637         result = wcqe->parameter;
1638
1639         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1640         atomic_inc(&tgtp->xmt_abort_cmpl);
1641
1642         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1643                         "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1644                         ctxp, wcqe->word0, wcqe->total_data_placed,
1645                         result, wcqe->word3);
1646
1647         if (ctxp) {
1648                 /* Sanity check */
1649                 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
1650                         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1651                                         "6112 ABORT Wrong state:%d oxid x%x\n",
1652                                         ctxp->state, ctxp->oxid);
1653                 }
1654                 ctxp->state = LPFC_NVMET_STE_DONE;
1655                 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1656                 cmdwqe->context2 = NULL;
1657                 cmdwqe->context3 = NULL;
1658         }
1659 }
1660
1661 /**
1662  * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1663  * @phba: Pointer to HBA context object.
1664  * @cmdwqe: Pointer to driver command WQE object.
1665  * @wcqe: Pointer to driver response CQE object.
1666  *
1667  * The function is called from SLI ring event handler with no
1668  * lock held. This function is the completion handler for NVME ABTS for LS cmds
1669  * The function frees memory resources used for the NVME commands.
1670  **/
1671 static void
1672 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1673                             struct lpfc_wcqe_complete *wcqe)
1674 {
1675         struct lpfc_nvmet_rcv_ctx *ctxp;
1676         struct lpfc_nvmet_tgtport *tgtp;
1677         uint32_t status, result;
1678
1679         ctxp = cmdwqe->context2;
1680         status = bf_get(lpfc_wcqe_c_status, wcqe);
1681         result = wcqe->parameter;
1682
1683         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1684         atomic_inc(&tgtp->xmt_abort_cmpl);
1685
1686         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1687                         "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1688                         ctxp, wcqe->word0, wcqe->total_data_placed,
1689                         result, wcqe->word3);
1690
1691         if (ctxp) {
1692                 cmdwqe->context2 = NULL;
1693                 cmdwqe->context3 = NULL;
1694                 lpfc_sli_release_iocbq(phba, cmdwqe);
1695                 kfree(ctxp);
1696         } else
1697                 lpfc_sli_release_iocbq(phba, cmdwqe);
1698 }
1699
1700 static int
1701 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1702                              struct lpfc_nvmet_rcv_ctx *ctxp,
1703                              uint32_t sid, uint16_t xri)
1704 {
1705         struct lpfc_nvmet_tgtport *tgtp;
1706         struct lpfc_iocbq *abts_wqeq;
1707         union lpfc_wqe *wqe_abts;
1708         struct lpfc_nodelist *ndlp;
1709
1710         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1711                         "6067 Abort: sid %x xri x%x/x%x\n",
1712                         sid, xri, ctxp->wqeq->sli4_xritag);
1713
1714         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1715
1716         ndlp = lpfc_findnode_did(phba->pport, sid);
1717         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1718             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1719             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1720                 atomic_inc(&tgtp->xmt_abort_rsp_error);
1721                 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1722                                 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1723                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1724
1725                 /* No failure to an ABTS request. */
1726                 return 0;
1727         }
1728
1729         abts_wqeq = ctxp->wqeq;
1730         wqe_abts = &abts_wqeq->wqe;
1731         ctxp->state = LPFC_NVMET_STE_ABORT;
1732
1733         /*
1734          * Since we zero the whole WQE, we need to ensure we set the WQE fields
1735          * that were initialized in lpfc_sli4_nvmet_alloc.
1736          */
1737         memset(wqe_abts, 0, sizeof(union lpfc_wqe));
1738
1739         /* Word 5 */
1740         bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
1741         bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
1742         bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
1743         bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
1744         bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
1745
1746         /* Word 6 */
1747         bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
1748                phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1749         bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
1750                abts_wqeq->sli4_xritag);
1751
1752         /* Word 7 */
1753         bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
1754                CMD_XMIT_SEQUENCE64_WQE);
1755         bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
1756         bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
1757         bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
1758
1759         /* Word 8 */
1760         wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
1761
1762         /* Word 9 */
1763         bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
1764         /* Needs to be set by caller */
1765         bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
1766
1767         /* Word 10 */
1768         bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
1769         bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1770         bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
1771                LPFC_WQE_LENLOC_WORD12);
1772         bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
1773         bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
1774
1775         /* Word 11 */
1776         bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
1777                LPFC_WQE_CQ_ID_DEFAULT);
1778         bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
1779                OTHER_COMMAND);
1780
1781         abts_wqeq->vport = phba->pport;
1782         abts_wqeq->context1 = ndlp;
1783         abts_wqeq->context2 = ctxp;
1784         abts_wqeq->context3 = NULL;
1785         abts_wqeq->rsvd2 = 0;
1786         /* hba_wqidx should already be setup from command we are aborting */
1787         abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1788         abts_wqeq->iocb.ulpLe = 1;
1789
1790         lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1791                         "6069 Issue ABTS to xri x%x reqtag x%x\n",
1792                         xri, abts_wqeq->iotag);
1793         return 1;
1794 }
1795
1796 static int
1797 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
1798                                struct lpfc_nvmet_rcv_ctx *ctxp,
1799                                uint32_t sid, uint16_t xri)
1800 {
1801         struct lpfc_nvmet_tgtport *tgtp;
1802         struct lpfc_iocbq *abts_wqeq;
1803         union lpfc_wqe *abts_wqe;
1804         struct lpfc_nodelist *ndlp;
1805         unsigned long flags;
1806         int rc;
1807
1808         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1809         if (!ctxp->wqeq) {
1810                 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1811                 ctxp->wqeq->hba_wqidx = 0;
1812         }
1813
1814         ndlp = lpfc_findnode_did(phba->pport, sid);
1815         if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1816             ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1817             (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1818                 atomic_inc(&tgtp->xmt_abort_rsp_error);
1819                 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1820                                 "6160 Drop ABTS - wrong NDLP state x%x.\n",
1821                                 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1822
1823                 /* No failure to an ABTS request. */
1824                 return 0;
1825         }
1826
1827         /* Issue ABTS for this WQE based on iotag */
1828         ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
1829         if (!ctxp->abort_wqeq) {
1830                 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1831                                 "6161 Abort failed: No wqeqs: "
1832                                 "xri: x%x\n", ctxp->oxid);
1833                 /* No failure to an ABTS request. */
1834                 return 0;
1835         }
1836         abts_wqeq = ctxp->abort_wqeq;
1837         abts_wqe = &abts_wqeq->wqe;
1838         ctxp->state = LPFC_NVMET_STE_ABORT;
1839
1840         /* Announce entry to new IO submit field. */
1841         lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1842                         "6162 Abort Request to rport DID x%06x "
1843                         "for xri x%x x%x\n",
1844                         ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
1845
1846         /* If the hba is getting reset, this flag is set.  It is
1847          * cleared when the reset is complete and rings reestablished.
1848          */
1849         spin_lock_irqsave(&phba->hbalock, flags);
1850         /* driver queued commands are in process of being flushed */
1851         if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1852                 spin_unlock_irqrestore(&phba->hbalock, flags);
1853                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1854                                 "6163 Driver in reset cleanup - flushing "
1855                                 "NVME Req now. hba_flag x%x oxid x%x\n",
1856                                 phba->hba_flag, ctxp->oxid);
1857                 lpfc_sli_release_iocbq(phba, abts_wqeq);
1858                 return 0;
1859         }
1860
1861         /* Outstanding abort is in progress */
1862         if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
1863                 spin_unlock_irqrestore(&phba->hbalock, flags);
1864                 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1865                                 "6164 Outstanding NVME I/O Abort Request "
1866                                 "still pending on oxid x%x\n",
1867                                 ctxp->oxid);
1868                 lpfc_sli_release_iocbq(phba, abts_wqeq);
1869                 return 0;
1870         }
1871
1872         /* Ready - mark outstanding as aborted by driver. */
1873         abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
1874
1875         /* WQEs are reused.  Clear stale data and set key fields to
1876          * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1877          */
1878         memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1879
1880         /* word 3 */
1881         bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1882
1883         /* word 7 */
1884         bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1885         bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1886
1887         /* word 8 - tell the FW to abort the IO associated with this
1888          * outstanding exchange ID.
1889          */
1890         abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
1891
1892         /* word 9 - this is the iotag for the abts_wqe completion. */
1893         bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1894                abts_wqeq->iotag);
1895
1896         /* word 10 */
1897         bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1898         bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1899
1900         /* word 11 */
1901         bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1902         bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1903         bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1904
1905         /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1906         abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
1907         abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
1908         abts_wqeq->iocb_cmpl = 0;
1909         abts_wqeq->iocb_flag |= LPFC_IO_NVME;
1910         abts_wqeq->context2 = ctxp;
1911         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1912         spin_unlock_irqrestore(&phba->hbalock, flags);
1913         if (rc == WQE_SUCCESS)
1914                 return 0;
1915
1916         lpfc_sli_release_iocbq(phba, abts_wqeq);
1917         lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1918                         "6166 Failed abts issue_wqe with status x%x "
1919                         "for oxid x%x.\n",
1920                         rc, ctxp->oxid);
1921         return 1;
1922 }
1923
1924
1925 static int
1926 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
1927                                  struct lpfc_nvmet_rcv_ctx *ctxp,
1928                                  uint32_t sid, uint16_t xri)
1929 {
1930         struct lpfc_nvmet_tgtport *tgtp;
1931         struct lpfc_iocbq *abts_wqeq;
1932         unsigned long flags;
1933         int rc;
1934
1935         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1936         if (!ctxp->wqeq) {
1937                 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1938                 ctxp->wqeq->hba_wqidx = 0;
1939         }
1940
1941         rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1942         if (rc == 0)
1943                 goto aerr;
1944
1945         spin_lock_irqsave(&phba->hbalock, flags);
1946         abts_wqeq = ctxp->wqeq;
1947         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
1948         abts_wqeq->iocb_cmpl = 0;
1949         abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
1950         rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1951         spin_unlock_irqrestore(&phba->hbalock, flags);
1952         if (rc == WQE_SUCCESS) {
1953                 atomic_inc(&tgtp->xmt_abort_rsp);
1954                 return 0;
1955         }
1956
1957 aerr:
1958         lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1959         atomic_inc(&tgtp->xmt_abort_rsp_error);
1960         lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1961                         "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
1962                         ctxp->oxid, rc);
1963         return 1;
1964 }
1965
1966 static int
1967 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
1968                                 struct lpfc_nvmet_rcv_ctx *ctxp,
1969                                 uint32_t sid, uint16_t xri)
1970 {
1971         struct lpfc_nvmet_tgtport *tgtp;
1972         struct lpfc_iocbq *abts_wqeq;
1973         union lpfc_wqe *wqe_abts;
1974         unsigned long flags;
1975         int rc;
1976
1977         tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1978         if (!ctxp->wqeq) {
1979                 /* Issue ABTS for this WQE based on iotag */
1980                 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
1981                 if (!ctxp->wqeq) {
1982                         lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1983                                         "6068 Abort failed: No wqeqs: "
1984                                         "xri: x%x\n", xri);
1985                         /* No failure to an ABTS request. */
1986                         kfree(ctxp);
1987                         return 0;
1988                 }
1989         }
1990         abts_wqeq = ctxp->wqeq;
1991         wqe_abts = &abts_wqeq->wqe;
1992         lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1993
1994         spin_lock_irqsave(&phba->hbalock, flags);
1995         abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
1996         abts_wqeq->iocb_cmpl = 0;
1997         abts_wqeq->iocb_flag |=  LPFC_IO_NVME_LS;
1998         rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
1999         spin_unlock_irqrestore(&phba->hbalock, flags);
2000         if (rc == WQE_SUCCESS) {
2001                 atomic_inc(&tgtp->xmt_abort_rsp);
2002                 return 0;
2003         }
2004
2005         atomic_inc(&tgtp->xmt_abort_rsp_error);
2006         abts_wqeq->context2 = NULL;
2007         abts_wqeq->context3 = NULL;
2008         lpfc_sli_release_iocbq(phba, abts_wqeq);
2009         kfree(ctxp);
2010         lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2011                         "6056 Failed to Issue ABTS. Status x%x\n", rc);
2012         return 0;
2013 }