1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 struct lpfc_nvmet_rcv_ctx *,
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 struct lpfc_nvmet_rcv_ctx *,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 struct lpfc_nvmet_rcv_ctx *,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 struct lpfc_nvmet_rcv_ctx *,
75 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
76 * @phba: Pointer to HBA context object.
77 * @cmdwqe: Pointer to driver command WQE object.
78 * @wcqe: Pointer to driver response CQE object.
80 * The function is called from SLI ring event handler with no
81 * lock held. This function is the completion handler for NVME LS commands
82 * The function frees memory resources used for the NVME commands.
85 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
86 struct lpfc_wcqe_complete *wcqe)
88 struct lpfc_nvmet_tgtport *tgtp;
89 struct nvmefc_tgt_ls_req *rsp;
90 struct lpfc_nvmet_rcv_ctx *ctxp;
91 uint32_t status, result;
93 status = bf_get(lpfc_wcqe_c_status, wcqe);
94 result = wcqe->parameter;
95 if (!phba->targetport)
98 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
101 atomic_inc(&tgtp->xmt_ls_rsp_error);
103 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
106 ctxp = cmdwqe->context2;
107 rsp = &ctxp->ctx.ls_req;
109 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
110 ctxp->oxid, status, result);
112 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
113 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
114 ctxp, status, result);
116 lpfc_nlp_put(cmdwqe->context1);
117 cmdwqe->context2 = NULL;
118 cmdwqe->context3 = NULL;
119 lpfc_sli_release_iocbq(phba, cmdwqe);
125 * lpfc_nvmet_rq_post - Repost a NVMET RQ DMA buffer and clean up context
126 * @phba: HBA buffer is associated with
127 * @ctxp: context to clean up
128 * @mp: Buffer to free
130 * Description: Frees the given DMA buffer in the appropriate way given by
131 * reposting it to its associated RQ so it can be reused.
133 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
138 lpfc_nvmet_rq_post(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp,
139 struct lpfc_dmabuf *mp)
143 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
146 ctxp->txrdy_phys = 0;
148 ctxp->state = LPFC_NVMET_STE_FREE;
150 lpfc_rq_buf_free(phba, mp);
153 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
155 lpfc_nvmet_ktime(struct lpfc_hba *phba,
156 struct lpfc_nvmet_rcv_ctx *ctxp)
158 uint64_t seg1, seg2, seg3, seg4, seg5;
159 uint64_t seg6, seg7, seg8, seg9, seg10;
164 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
165 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
166 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
167 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
168 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
171 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
173 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
175 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
177 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
179 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
181 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
183 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
185 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
187 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
190 * Segment 1 - Time from FCP command received by MSI-X ISR
191 * to FCP command is passed to NVME Layer.
192 * Segment 2 - Time from FCP command payload handed
193 * off to NVME Layer to Driver receives a Command op
195 * Segment 3 - Time from Driver receives a Command op
196 * from NVME Layer to Command is put on WQ.
197 * Segment 4 - Time from Driver WQ put is done
198 * to MSI-X ISR for Command cmpl.
199 * Segment 5 - Time from MSI-X ISR for Command cmpl to
200 * Command cmpl is passed to NVME Layer.
201 * Segment 6 - Time from Command cmpl is passed to NVME
202 * Layer to Driver receives a RSP op from NVME Layer.
203 * Segment 7 - Time from Driver receives a RSP op from
204 * NVME Layer to WQ put is done on TRSP FCP Status.
205 * Segment 8 - Time from Driver WQ put is done on TRSP
206 * FCP Status to MSI-X ISR for TRSP cmpl.
207 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
208 * TRSP cmpl is passed to NVME Layer.
209 * Segment 10 - Time from FCP command received by
210 * MSI-X ISR to command is completed on wire.
211 * (Segments 1 thru 8) for READDATA / WRITEDATA
212 * (Segments 1 thru 4) for READDATA_RSP
214 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
215 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
216 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
218 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
220 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
221 seg1 - seg2 - seg3 - seg4;
223 /* For auto rsp commands seg6 thru seg10 will be 0 */
224 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
225 seg6 = (ctxp->ts_nvme_status -
227 seg1 - seg2 - seg3 - seg4 - seg5;
228 seg7 = (ctxp->ts_status_wqput -
232 seg8 = (ctxp->ts_isr_status -
234 seg1 - seg2 - seg3 - seg4 -
236 seg9 = (ctxp->ts_status_nvme -
238 seg1 - seg2 - seg3 - seg4 -
239 seg5 - seg6 - seg7 - seg8;
240 seg10 = (ctxp->ts_isr_status -
247 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
250 phba->ktime_seg1_total += seg1;
251 if (seg1 < phba->ktime_seg1_min)
252 phba->ktime_seg1_min = seg1;
253 else if (seg1 > phba->ktime_seg1_max)
254 phba->ktime_seg1_max = seg1;
256 phba->ktime_seg2_total += seg2;
257 if (seg2 < phba->ktime_seg2_min)
258 phba->ktime_seg2_min = seg2;
259 else if (seg2 > phba->ktime_seg2_max)
260 phba->ktime_seg2_max = seg2;
262 phba->ktime_seg3_total += seg3;
263 if (seg3 < phba->ktime_seg3_min)
264 phba->ktime_seg3_min = seg3;
265 else if (seg3 > phba->ktime_seg3_max)
266 phba->ktime_seg3_max = seg3;
268 phba->ktime_seg4_total += seg4;
269 if (seg4 < phba->ktime_seg4_min)
270 phba->ktime_seg4_min = seg4;
271 else if (seg4 > phba->ktime_seg4_max)
272 phba->ktime_seg4_max = seg4;
274 phba->ktime_seg5_total += seg5;
275 if (seg5 < phba->ktime_seg5_min)
276 phba->ktime_seg5_min = seg5;
277 else if (seg5 > phba->ktime_seg5_max)
278 phba->ktime_seg5_max = seg5;
280 phba->ktime_data_samples++;
284 phba->ktime_seg6_total += seg6;
285 if (seg6 < phba->ktime_seg6_min)
286 phba->ktime_seg6_min = seg6;
287 else if (seg6 > phba->ktime_seg6_max)
288 phba->ktime_seg6_max = seg6;
290 phba->ktime_seg7_total += seg7;
291 if (seg7 < phba->ktime_seg7_min)
292 phba->ktime_seg7_min = seg7;
293 else if (seg7 > phba->ktime_seg7_max)
294 phba->ktime_seg7_max = seg7;
296 phba->ktime_seg8_total += seg8;
297 if (seg8 < phba->ktime_seg8_min)
298 phba->ktime_seg8_min = seg8;
299 else if (seg8 > phba->ktime_seg8_max)
300 phba->ktime_seg8_max = seg8;
302 phba->ktime_seg9_total += seg9;
303 if (seg9 < phba->ktime_seg9_min)
304 phba->ktime_seg9_min = seg9;
305 else if (seg9 > phba->ktime_seg9_max)
306 phba->ktime_seg9_max = seg9;
308 phba->ktime_seg10_total += seg10;
309 if (seg10 < phba->ktime_seg10_min)
310 phba->ktime_seg10_min = seg10;
311 else if (seg10 > phba->ktime_seg10_max)
312 phba->ktime_seg10_max = seg10;
313 phba->ktime_status_samples++;
318 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
319 * @phba: Pointer to HBA context object.
320 * @cmdwqe: Pointer to driver command WQE object.
321 * @wcqe: Pointer to driver response CQE object.
323 * The function is called from SLI ring event handler with no
324 * lock held. This function is the completion handler for NVME FCP commands
325 * The function frees memory resources used for the NVME commands.
328 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
329 struct lpfc_wcqe_complete *wcqe)
331 struct lpfc_nvmet_tgtport *tgtp;
332 struct nvmefc_tgt_fcp_req *rsp;
333 struct lpfc_nvmet_rcv_ctx *ctxp;
334 uint32_t status, result, op, start_clean;
335 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
339 ctxp = cmdwqe->context2;
340 rsp = &ctxp->ctx.fcp_req;
342 ctxp->flag &= ~LPFC_NVMET_IO_INP;
344 status = bf_get(lpfc_wcqe_c_status, wcqe);
345 result = wcqe->parameter;
347 if (!phba->targetport)
350 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
351 ctxp->oxid, op, status);
353 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
355 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
356 rsp->transferred_length = 0;
357 atomic_inc(&tgtp->xmt_fcp_rsp_error);
359 rsp->fcp_error = NVME_SC_SUCCESS;
360 if (op == NVMET_FCOP_RSP)
361 rsp->transferred_length = rsp->rsplen;
363 rsp->transferred_length = rsp->transfer_length;
364 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
368 if ((op == NVMET_FCOP_READDATA_RSP) ||
369 (op == NVMET_FCOP_RSP)) {
371 ctxp->state = LPFC_NVMET_STE_DONE;
373 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
374 if (phba->ktime_on) {
375 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
377 cmdwqe->isr_timestamp;
380 ctxp->ts_nvme_status =
382 ctxp->ts_status_wqput =
384 ctxp->ts_isr_status =
386 ctxp->ts_status_nvme =
389 ctxp->ts_isr_status =
390 cmdwqe->isr_timestamp;
391 ctxp->ts_status_nvme =
395 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
396 id = smp_processor_id();
398 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
399 "6703 CPU Check cmpl: "
400 "cpu %d expect %d\n",
402 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
403 phba->cpucheck_cmpl_io[id]++;
407 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
409 lpfc_nvmet_ktime(phba, ctxp);
411 /* Let Abort cmpl repost the context */
412 if (!(ctxp->flag & LPFC_NVMET_ABORT_OP))
413 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
416 start_clean = offsetof(struct lpfc_iocbq, wqe);
417 memset(((char *)cmdwqe) + start_clean, 0,
418 (sizeof(struct lpfc_iocbq) - start_clean));
419 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
420 if (phba->ktime_on) {
421 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
422 ctxp->ts_data_nvme = ktime_get_ns();
424 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
425 id = smp_processor_id();
427 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
428 "6704 CPU Check cmdcmpl: "
429 "cpu %d expect %d\n",
431 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
432 phba->cpucheck_ccmpl_io[id]++;
440 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
441 struct nvmefc_tgt_ls_req *rsp)
443 struct lpfc_nvmet_rcv_ctx *ctxp =
444 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
445 struct lpfc_hba *phba = ctxp->phba;
446 struct hbq_dmabuf *nvmebuf =
447 (struct hbq_dmabuf *)ctxp->rqb_buffer;
448 struct lpfc_iocbq *nvmewqeq;
449 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
450 struct lpfc_dmabuf dmabuf;
451 struct ulp_bde64 bpl;
454 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
455 "6023 %s: Entrypoint ctx %p %p\n", __func__,
458 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
460 if (nvmewqeq == NULL) {
461 atomic_inc(&nvmep->xmt_ls_drop);
462 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
463 "6150 LS Drop IO x%x: Prep\n",
465 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
466 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
467 ctxp->sid, ctxp->oxid);
471 /* Save numBdes for bpl2sgl */
473 nvmewqeq->hba_wqidx = 0;
474 nvmewqeq->context3 = &dmabuf;
476 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
477 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
478 bpl.tus.f.bdeSize = rsp->rsplen;
479 bpl.tus.f.bdeFlags = 0;
480 bpl.tus.w = le32_to_cpu(bpl.tus.w);
482 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
483 nvmewqeq->iocb_cmpl = NULL;
484 nvmewqeq->context2 = ctxp;
486 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
487 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
489 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
490 if (rc == WQE_SUCCESS) {
492 * Okay to repost buffer here, but wait till cmpl
493 * before freeing ctxp and iocbq.
495 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
496 ctxp->rqb_buffer = 0;
497 atomic_inc(&nvmep->xmt_ls_rsp);
500 /* Give back resources */
501 atomic_inc(&nvmep->xmt_ls_drop);
502 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
503 "6151 LS Drop IO x%x: Issue %d\n",
506 lpfc_nlp_put(nvmewqeq->context1);
508 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
509 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
514 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
515 struct nvmefc_tgt_fcp_req *rsp)
517 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
518 struct lpfc_nvmet_rcv_ctx *ctxp =
519 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
520 struct lpfc_hba *phba = ctxp->phba;
521 struct lpfc_iocbq *nvmewqeq;
522 unsigned long iflags;
525 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
526 if (phba->ktime_on) {
527 if (rsp->op == NVMET_FCOP_RSP)
528 ctxp->ts_nvme_status = ktime_get_ns();
530 ctxp->ts_nvme_data = ktime_get_ns();
532 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
533 int id = smp_processor_id();
535 if (id < LPFC_CHECK_CPU_CNT)
536 phba->cpucheck_xmt_io[id]++;
537 if (rsp->hwqid != id) {
538 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
539 "6705 CPU Check OP: "
540 "cpu %d expect %d\n",
542 ctxp->cpu = rsp->hwqid;
547 if (rsp->op == NVMET_FCOP_ABORT) {
548 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
549 "6103 Abort op: oxri x%x %d cnt %d\n",
550 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
552 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
553 "xri x%x state x%x cnt x%x\n",
554 ctxp->oxid, ctxp->state, ctxp->entry_cnt);
556 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
558 ctxp->flag |= LPFC_NVMET_ABORT_OP;
559 if (ctxp->flag & LPFC_NVMET_IO_INP)
560 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
563 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
569 if (ctxp->state == LPFC_NVMET_STE_ABORT) {
570 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
571 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
572 "6102 Bad state IO x%x aborted\n",
578 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
579 if (nvmewqeq == NULL) {
580 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
581 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
582 "6152 FCP Drop IO x%x: Prep\n",
588 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
589 nvmewqeq->iocb_cmpl = NULL;
590 nvmewqeq->context2 = ctxp;
591 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
592 ctxp->wqeq->hba_wqidx = rsp->hwqid;
594 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
595 ctxp->oxid, rsp->op, rsp->rsplen);
597 /* For now we take hbalock */
598 spin_lock_irqsave(&phba->hbalock, iflags);
599 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
600 spin_unlock_irqrestore(&phba->hbalock, iflags);
601 if (rc == WQE_SUCCESS) {
602 ctxp->flag |= LPFC_NVMET_IO_INP;
603 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
606 if (rsp->op == NVMET_FCOP_RSP)
607 ctxp->ts_status_wqput = ktime_get_ns();
609 ctxp->ts_data_wqput = ktime_get_ns();
614 /* Give back resources */
615 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
616 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
617 "6153 FCP Drop IO x%x: Issue: %d\n",
620 ctxp->wqeq->hba_wqidx = 0;
621 nvmewqeq->context2 = NULL;
622 nvmewqeq->context3 = NULL;
629 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
631 struct lpfc_nvmet_tgtport *tport = targetport->private;
633 /* release any threads waiting for the unreg to complete */
634 complete(&tport->tport_unreg_done);
637 static struct nvmet_fc_target_template lpfc_tgttemplate = {
638 .targetport_delete = lpfc_nvmet_targetport_delete,
639 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
640 .fcp_op = lpfc_nvmet_xmt_fcp_op,
643 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
644 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
645 .dma_boundary = 0xFFFFFFFF,
647 /* optional features */
648 .target_features = 0,
649 /* sizes of additional private data for data structures */
650 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
654 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
656 struct lpfc_vport *vport = phba->pport;
657 struct lpfc_nvmet_tgtport *tgtp;
658 struct nvmet_fc_port_info pinfo;
661 if (phba->targetport)
664 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
665 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
666 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
667 pinfo.port_id = vport->fc_myDID;
669 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
670 lpfc_tgttemplate.max_sgl_segments = phba->cfg_sg_seg_cnt;
671 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
672 NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
674 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
675 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
682 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
683 "6025 Cannot register NVME targetport "
685 phba->targetport = NULL;
687 tgtp = (struct lpfc_nvmet_tgtport *)
688 phba->targetport->private;
691 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
692 "6026 Registered NVME "
693 "targetport: %p, private %p "
694 "portnm %llx nodenm %llx\n",
695 phba->targetport, tgtp,
696 pinfo.port_name, pinfo.node_name);
698 atomic_set(&tgtp->rcv_ls_req_in, 0);
699 atomic_set(&tgtp->rcv_ls_req_out, 0);
700 atomic_set(&tgtp->rcv_ls_req_drop, 0);
701 atomic_set(&tgtp->xmt_ls_abort, 0);
702 atomic_set(&tgtp->xmt_ls_rsp, 0);
703 atomic_set(&tgtp->xmt_ls_drop, 0);
704 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
705 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
706 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
707 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
708 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
709 atomic_set(&tgtp->xmt_fcp_abort, 0);
710 atomic_set(&tgtp->xmt_fcp_drop, 0);
711 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
712 atomic_set(&tgtp->xmt_fcp_read, 0);
713 atomic_set(&tgtp->xmt_fcp_write, 0);
714 atomic_set(&tgtp->xmt_fcp_rsp, 0);
715 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
716 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
717 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
718 atomic_set(&tgtp->xmt_abort_rsp, 0);
719 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
720 atomic_set(&tgtp->xmt_abort_cmpl, 0);
726 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
728 struct lpfc_vport *vport = phba->pport;
730 if (!phba->targetport)
733 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
734 "6007 Update NVMET port %p did x%x\n",
735 phba->targetport, vport->fc_myDID);
737 phba->targetport->port_id = vport->fc_myDID;
742 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
743 * @phba: pointer to lpfc hba data structure.
744 * @axri: pointer to the nvmet xri abort wcqe structure.
746 * This routine is invoked by the worker thread to process a SLI4 fast-path
750 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
751 struct sli4_wcqe_xri_aborted *axri)
753 /* TODO: work in progress */
757 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
759 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
760 struct lpfc_nvmet_tgtport *tgtp;
762 if (phba->nvmet_support == 0)
764 if (phba->targetport) {
765 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
766 init_completion(&tgtp->tport_unreg_done);
767 nvmet_fc_unregister_targetport(phba->targetport);
768 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
770 phba->targetport = NULL;
775 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
776 * @phba: pointer to lpfc hba data structure.
777 * @pring: pointer to a SLI ring.
778 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
780 * This routine is used for processing the WQE associated with a unsolicited
781 * event. It first determines whether there is an existing ndlp that matches
782 * the DID from the unsolicited WQE. If not, it will create a new one with
783 * the DID from the unsolicited WQE. The ELS command from the unsolicited
784 * WQE is then used to invoke the proper routine and to set up proper state
785 * of the discovery state machine.
788 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
789 struct hbq_dmabuf *nvmebuf)
791 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
792 struct lpfc_nvmet_tgtport *tgtp;
793 struct fc_frame_header *fc_hdr;
794 struct lpfc_nvmet_rcv_ctx *ctxp;
796 uint32_t size, oxid, sid, rc;
798 if (!nvmebuf || !phba->targetport) {
799 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
800 "6154 LS Drop IO\n");
807 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
808 payload = (uint32_t *)(nvmebuf->dbuf.virt);
809 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
810 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
811 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
812 sid = sli4_sid_from_fc_hdr(fc_hdr);
814 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
816 atomic_inc(&tgtp->rcv_ls_req_drop);
817 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
818 "6155 LS Drop IO x%x: Alloc\n",
821 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
822 "xri x%x sz %d from %06x\n",
825 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
833 ctxp->state = LPFC_NVMET_STE_RCV;
834 ctxp->rqb_buffer = (void *)nvmebuf;
836 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
839 * The calling sequence should be:
840 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
841 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
843 atomic_inc(&tgtp->rcv_ls_req_in);
844 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
847 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
848 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
849 "%08x %08x %08x\n", __func__, ctxp, size, rc,
850 *payload, *(payload+1), *(payload+2),
851 *(payload+3), *(payload+4), *(payload+5));
854 atomic_inc(&tgtp->rcv_ls_req_out);
858 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
861 atomic_inc(&tgtp->rcv_ls_req_drop);
862 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
863 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
866 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
868 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
870 atomic_inc(&tgtp->xmt_ls_abort);
871 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
876 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
877 * @phba: pointer to lpfc hba data structure.
878 * @pring: pointer to a SLI ring.
879 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
881 * This routine is used for processing the WQE associated with a unsolicited
882 * event. It first determines whether there is an existing ndlp that matches
883 * the DID from the unsolicited WQE. If not, it will create a new one with
884 * the DID from the unsolicited WQE. The ELS command from the unsolicited
885 * WQE is then used to invoke the proper routine and to set up proper state
886 * of the discovery state machine.
889 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
890 struct lpfc_sli_ring *pring,
891 struct rqb_dmabuf *nvmebuf,
892 uint64_t isr_timestamp)
894 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
895 struct lpfc_nvmet_rcv_ctx *ctxp;
896 struct lpfc_nvmet_tgtport *tgtp;
897 struct fc_frame_header *fc_hdr;
899 uint32_t size, oxid, sid, rc;
900 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
904 if (!nvmebuf || !phba->targetport) {
905 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
906 "6157 FCP Drop IO\n");
914 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
915 payload = (uint32_t *)(nvmebuf->dbuf.virt);
916 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
917 size = nvmebuf->bytes_recv;
918 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
919 sid = sli4_sid_from_fc_hdr(fc_hdr);
921 ctxp = (struct lpfc_nvmet_rcv_ctx *)nvmebuf->context;
923 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
924 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
925 "6158 FCP Drop IO x%x: Alloc\n",
927 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
928 /* Cannot send ABTS without context */
931 memset(ctxp, 0, sizeof(ctxp->ctx));
939 ctxp->state = LPFC_NVMET_STE_RCV;
940 ctxp->rqb_buffer = nvmebuf;
944 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
945 if (phba->ktime_on) {
946 ctxp->ts_isr_cmd = isr_timestamp;
947 ctxp->ts_cmd_nvme = ktime_get_ns();
948 ctxp->ts_nvme_data = 0;
949 ctxp->ts_data_wqput = 0;
950 ctxp->ts_isr_data = 0;
951 ctxp->ts_data_nvme = 0;
952 ctxp->ts_nvme_status = 0;
953 ctxp->ts_status_wqput = 0;
954 ctxp->ts_isr_status = 0;
955 ctxp->ts_status_nvme = 0;
958 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
959 id = smp_processor_id();
960 if (id < LPFC_CHECK_CPU_CNT)
961 phba->cpucheck_rcv_io[id]++;
965 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d from %06x\n",
968 atomic_inc(&tgtp->rcv_fcp_cmd_in);
970 * The calling sequence should be:
971 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
972 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
974 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
977 /* Process FCP command */
979 atomic_inc(&tgtp->rcv_fcp_cmd_out);
983 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
984 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
985 "6159 FCP Drop IO x%x: err x%x\n",
988 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
991 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
996 nvmebuf->iocbq->hba_wqidx = 0;
997 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
998 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1004 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1005 * @phba: pointer to lpfc hba data structure.
1006 * @pring: pointer to a SLI ring.
1007 * @nvmebuf: pointer to received nvme data structure.
1009 * This routine is used to process an unsolicited event received from a SLI
1010 * (Service Level Interface) ring. The actual processing of the data buffer
1011 * associated with the unsolicited event is done by invoking the routine
1012 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1013 * SLI RQ on which the unsolicited event was received.
1016 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1017 struct lpfc_iocbq *piocb)
1019 struct lpfc_dmabuf *d_buf;
1020 struct hbq_dmabuf *nvmebuf;
1022 d_buf = piocb->context2;
1023 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1025 if (phba->nvmet_support == 0) {
1026 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1029 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1033 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1034 * @phba: pointer to lpfc hba data structure.
1035 * @pring: pointer to a SLI ring.
1036 * @nvmebuf: pointer to received nvme data structure.
1038 * This routine is used to process an unsolicited event received from a SLI
1039 * (Service Level Interface) ring. The actual processing of the data buffer
1040 * associated with the unsolicited event is done by invoking the routine
1041 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1042 * SLI RQ on which the unsolicited event was received.
1045 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1046 struct lpfc_sli_ring *pring,
1047 struct rqb_dmabuf *nvmebuf,
1048 uint64_t isr_timestamp)
1050 if (phba->nvmet_support == 0) {
1051 lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
1054 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1059 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1060 * @phba: pointer to a host N_Port data structure.
1061 * @ctxp: Context info for NVME LS Request
1062 * @rspbuf: DMA buffer of NVME command.
1063 * @rspsize: size of the NVME command.
1065 * This routine is used for allocating a lpfc-WQE data structure from
1066 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1067 * passed into the routine for discovery state machine to issue an Extended
1068 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1069 * and preparation routine that is used by all the discovery state machine
1070 * routines and the NVME command-specific fields will be later set up by
1071 * the individual discovery machine routines after calling this routine
1072 * allocating and preparing a generic WQE data structure. It fills in the
1073 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1074 * payload and response payload (if expected). The reference count on the
1075 * ndlp is incremented by 1 and the reference to the ndlp is put into
1076 * context1 of the WQE data structure for this WQE to hold the ndlp
1077 * reference for the command's callback function to access later.
1080 * Pointer to the newly allocated/prepared nvme wqe data structure
1081 * NULL - when nvme wqe data structure allocation/preparation failed
1083 static struct lpfc_iocbq *
1084 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1085 struct lpfc_nvmet_rcv_ctx *ctxp,
1086 dma_addr_t rspbuf, uint16_t rspsize)
1088 struct lpfc_nodelist *ndlp;
1089 struct lpfc_iocbq *nvmewqe;
1090 union lpfc_wqe *wqe;
1092 if (!lpfc_is_link_up(phba)) {
1093 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1094 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1095 "NPORT x%x oxid:x%x\n",
1096 ctxp->sid, ctxp->oxid);
1100 /* Allocate buffer for command wqe */
1101 nvmewqe = lpfc_sli_get_iocbq(phba);
1102 if (nvmewqe == NULL) {
1103 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1104 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1105 "NPORT x%x oxid:x%x\n",
1106 ctxp->sid, ctxp->oxid);
1110 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1111 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1112 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1113 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1114 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1115 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1116 "NPORT x%x oxid:x%x\n",
1117 ctxp->sid, ctxp->oxid);
1118 goto nvme_wqe_free_wqeq_exit;
1120 ctxp->wqeq = nvmewqe;
1122 /* prevent preparing wqe with NULL ndlp reference */
1123 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1124 if (nvmewqe->context1 == NULL)
1125 goto nvme_wqe_free_wqeq_exit;
1126 nvmewqe->context2 = ctxp;
1128 wqe = &nvmewqe->wqe;
1129 memset(wqe, 0, sizeof(union lpfc_wqe));
1132 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1133 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1134 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1135 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1142 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1143 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1144 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1145 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1146 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1149 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1150 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1151 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1154 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1155 CMD_XMIT_SEQUENCE64_WQE);
1156 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1157 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1158 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1161 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1164 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1165 /* Needs to be set by caller */
1166 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1169 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1170 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1171 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1172 LPFC_WQE_LENLOC_WORD12);
1173 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1176 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1177 LPFC_WQE_CQ_ID_DEFAULT);
1178 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1182 wqe->xmit_sequence.xmit_len = rspsize;
1185 nvmewqe->vport = phba->pport;
1186 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1187 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1189 /* Xmit NVME response to remote NPORT <did> */
1190 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1191 "6039 Xmit NVME LS response to remote "
1192 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1193 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1197 nvme_wqe_free_wqeq_exit:
1198 nvmewqe->context2 = NULL;
1199 nvmewqe->context3 = NULL;
1200 lpfc_sli_release_iocbq(phba, nvmewqe);
1205 static struct lpfc_iocbq *
1206 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1207 struct lpfc_nvmet_rcv_ctx *ctxp)
1209 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1210 struct lpfc_nvmet_tgtport *tgtp;
1211 struct sli4_sge *sgl;
1212 struct lpfc_nodelist *ndlp;
1213 struct lpfc_iocbq *nvmewqe;
1214 struct scatterlist *sgel;
1215 union lpfc_wqe128 *wqe;
1217 dma_addr_t physaddr;
1221 if (!lpfc_is_link_up(phba)) {
1222 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1223 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1224 "NPORT x%x oxid:x%x\n", ctxp->sid,
1229 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1230 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1231 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1232 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1233 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1234 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1235 "NPORT x%x oxid:x%x\n",
1236 ctxp->sid, ctxp->oxid);
1240 if (rsp->sg_cnt > phba->cfg_sg_seg_cnt) {
1241 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1242 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1243 "NPORT x%x oxid:x%x\n",
1244 ctxp->sid, ctxp->oxid);
1248 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1249 nvmewqe = ctxp->wqeq;
1250 if (nvmewqe == NULL) {
1251 /* Allocate buffer for command wqe */
1252 nvmewqe = ctxp->rqb_buffer->iocbq;
1253 if (nvmewqe == NULL) {
1254 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1255 "6110 lpfc_nvmet_prep_fcp_wqe: No "
1256 "WQE: NPORT x%x oxid:x%x\n",
1257 ctxp->sid, ctxp->oxid);
1260 ctxp->wqeq = nvmewqe;
1261 xc = 0; /* create new XRI */
1262 nvmewqe->sli4_lxritag = NO_XRI;
1263 nvmewqe->sli4_xritag = NO_XRI;
1267 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1268 (ctxp->entry_cnt == 1)) ||
1269 ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1270 (ctxp->entry_cnt > 1))) {
1271 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1273 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1274 "6111 Wrong state %s: %d cnt %d\n",
1275 __func__, ctxp->state, ctxp->entry_cnt);
1279 sgl = (struct sli4_sge *)ctxp->rqb_buffer->sglq->sgl;
1281 case NVMET_FCOP_READDATA:
1282 case NVMET_FCOP_READDATA_RSP:
1283 /* Words 0 - 2 : The first sg segment */
1285 physaddr = sg_dma_address(sgel);
1286 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1287 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1288 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1289 wqe->fcp_tsend.bde.addrHigh =
1290 cpu_to_le32(putPaddrHigh(physaddr));
1293 wqe->fcp_tsend.payload_offset_len = 0;
1296 wqe->fcp_tsend.relative_offset = ctxp->offset;
1301 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1302 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1303 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1304 nvmewqe->sli4_xritag);
1307 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1310 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1313 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1314 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1317 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1318 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1319 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1320 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1321 LPFC_WQE_LENLOC_WORD12);
1322 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1323 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1324 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1325 if (phba->cfg_nvme_oas)
1326 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1329 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1330 LPFC_WQE_CQ_ID_DEFAULT);
1331 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1335 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1337 /* Setup 2 SKIP SGEs */
1341 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1342 sgl->word2 = cpu_to_le32(sgl->word2);
1348 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1349 sgl->word2 = cpu_to_le32(sgl->word2);
1352 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1353 atomic_inc(&tgtp->xmt_fcp_read_rsp);
1354 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1355 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1356 (rsp->rsplen == 12)) {
1357 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1358 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1359 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1360 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1362 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1363 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1364 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1365 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1366 ((rsp->rsplen >> 2) - 1));
1367 memcpy(&wqe->words[16], rsp->rspaddr,
1371 atomic_inc(&tgtp->xmt_fcp_read);
1373 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1374 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1375 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1376 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1377 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1379 ctxp->state = LPFC_NVMET_STE_DATA;
1382 case NVMET_FCOP_WRITEDATA:
1383 /* Words 0 - 2 : The first sg segment */
1384 txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1385 GFP_KERNEL, &physaddr);
1387 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1388 "6041 Bad txrdy buffer: oxid x%x\n",
1392 ctxp->txrdy = txrdy;
1393 ctxp->txrdy_phys = physaddr;
1394 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1395 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1396 wqe->fcp_treceive.bde.addrLow =
1397 cpu_to_le32(putPaddrLow(physaddr));
1398 wqe->fcp_treceive.bde.addrHigh =
1399 cpu_to_le32(putPaddrHigh(physaddr));
1402 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1405 wqe->fcp_treceive.relative_offset = ctxp->offset;
1410 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1411 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1412 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1413 nvmewqe->sli4_xritag);
1416 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1417 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1418 CMD_FCP_TRECEIVE64_WQE);
1421 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1424 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1425 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1428 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1429 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1430 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1431 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1432 LPFC_WQE_LENLOC_WORD12);
1433 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1434 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1435 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1436 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1437 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1438 if (phba->cfg_nvme_oas)
1439 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1442 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1443 LPFC_WQE_CQ_ID_DEFAULT);
1444 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1445 FCP_COMMAND_TRECEIVE);
1446 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1449 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1451 /* Setup 1 TXRDY and 1 SKIP SGE */
1453 txrdy[1] = cpu_to_be32(rsp->transfer_length);
1456 sgl->addr_hi = putPaddrHigh(physaddr);
1457 sgl->addr_lo = putPaddrLow(physaddr);
1459 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1460 sgl->word2 = cpu_to_le32(sgl->word2);
1461 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1466 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1467 sgl->word2 = cpu_to_le32(sgl->word2);
1470 ctxp->state = LPFC_NVMET_STE_DATA;
1471 atomic_inc(&tgtp->xmt_fcp_write);
1474 case NVMET_FCOP_RSP:
1476 physaddr = rsp->rspdma;
1477 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1478 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1479 wqe->fcp_trsp.bde.addrLow =
1480 cpu_to_le32(putPaddrLow(physaddr));
1481 wqe->fcp_trsp.bde.addrHigh =
1482 cpu_to_le32(putPaddrHigh(physaddr));
1485 wqe->fcp_trsp.response_len = rsp->rsplen;
1488 wqe->fcp_trsp.rsvd_4_5[0] = 0;
1494 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1495 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1496 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1497 nvmewqe->sli4_xritag);
1500 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1501 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1504 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1507 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1508 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1511 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1512 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1513 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1514 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1515 LPFC_WQE_LENLOC_WORD3);
1516 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1517 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1518 if (phba->cfg_nvme_oas)
1519 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1522 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1523 LPFC_WQE_CQ_ID_DEFAULT);
1524 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1526 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1527 ctxp->state = LPFC_NVMET_STE_RSP;
1529 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1530 /* Good response - all zero's on wire */
1531 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1532 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1533 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1535 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1536 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1537 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1538 ((rsp->rsplen >> 2) - 1));
1539 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1542 /* Use rspbuf, NOT sg list */
1545 atomic_inc(&tgtp->xmt_fcp_rsp);
1549 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
1550 "6064 Unknown Rsp Op %d\n",
1556 nvmewqe->vport = phba->pport;
1557 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1558 nvmewqe->context1 = ndlp;
1560 for (i = 0; i < rsp->sg_cnt; i++) {
1562 physaddr = sg_dma_address(sgel);
1563 cnt = sg_dma_len(sgel);
1564 sgl->addr_hi = putPaddrHigh(physaddr);
1565 sgl->addr_lo = putPaddrLow(physaddr);
1567 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1568 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
1569 if ((i+1) == rsp->sg_cnt)
1570 bf_set(lpfc_sli4_sge_last, sgl, 1);
1571 sgl->word2 = cpu_to_le32(sgl->word2);
1572 sgl->sge_len = cpu_to_le32(cnt);
1574 ctxp->offset += cnt;
1580 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
1581 * @phba: Pointer to HBA context object.
1582 * @cmdwqe: Pointer to driver command WQE object.
1583 * @wcqe: Pointer to driver response CQE object.
1585 * The function is called from SLI ring event handler with no
1586 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1587 * The function frees memory resources used for the NVME commands.
1590 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1591 struct lpfc_wcqe_complete *wcqe)
1593 struct lpfc_nvmet_rcv_ctx *ctxp;
1594 struct lpfc_nvmet_tgtport *tgtp;
1595 uint32_t status, result;
1597 ctxp = cmdwqe->context2;
1598 status = bf_get(lpfc_wcqe_c_status, wcqe);
1599 result = wcqe->parameter;
1601 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1602 atomic_inc(&tgtp->xmt_abort_cmpl);
1604 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1605 "6165 Abort cmpl: xri x%x WCQE: %08x %08x %08x %08x\n",
1606 ctxp->oxid, wcqe->word0, wcqe->total_data_placed,
1607 result, wcqe->word3);
1609 ctxp->state = LPFC_NVMET_STE_DONE;
1610 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1612 cmdwqe->context2 = NULL;
1613 cmdwqe->context3 = NULL;
1614 lpfc_sli_release_iocbq(phba, cmdwqe);
1618 * lpfc_nvmet_xmt_fcp_abort_cmp - Completion handler for ABTS
1619 * @phba: Pointer to HBA context object.
1620 * @cmdwqe: Pointer to driver command WQE object.
1621 * @wcqe: Pointer to driver response CQE object.
1623 * The function is called from SLI ring event handler with no
1624 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
1625 * The function frees memory resources used for the NVME commands.
1628 lpfc_nvmet_xmt_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1629 struct lpfc_wcqe_complete *wcqe)
1631 struct lpfc_nvmet_rcv_ctx *ctxp;
1632 struct lpfc_nvmet_tgtport *tgtp;
1633 uint32_t status, result;
1635 ctxp = cmdwqe->context2;
1636 status = bf_get(lpfc_wcqe_c_status, wcqe);
1637 result = wcqe->parameter;
1639 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1640 atomic_inc(&tgtp->xmt_abort_cmpl);
1642 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1643 "6070 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1644 ctxp, wcqe->word0, wcqe->total_data_placed,
1645 result, wcqe->word3);
1649 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
1650 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1651 "6112 ABORT Wrong state:%d oxid x%x\n",
1652 ctxp->state, ctxp->oxid);
1654 ctxp->state = LPFC_NVMET_STE_DONE;
1655 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1656 cmdwqe->context2 = NULL;
1657 cmdwqe->context3 = NULL;
1662 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
1663 * @phba: Pointer to HBA context object.
1664 * @cmdwqe: Pointer to driver command WQE object.
1665 * @wcqe: Pointer to driver response CQE object.
1667 * The function is called from SLI ring event handler with no
1668 * lock held. This function is the completion handler for NVME ABTS for LS cmds
1669 * The function frees memory resources used for the NVME commands.
1672 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
1673 struct lpfc_wcqe_complete *wcqe)
1675 struct lpfc_nvmet_rcv_ctx *ctxp;
1676 struct lpfc_nvmet_tgtport *tgtp;
1677 uint32_t status, result;
1679 ctxp = cmdwqe->context2;
1680 status = bf_get(lpfc_wcqe_c_status, wcqe);
1681 result = wcqe->parameter;
1683 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1684 atomic_inc(&tgtp->xmt_abort_cmpl);
1686 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1687 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
1688 ctxp, wcqe->word0, wcqe->total_data_placed,
1689 result, wcqe->word3);
1692 cmdwqe->context2 = NULL;
1693 cmdwqe->context3 = NULL;
1694 lpfc_sli_release_iocbq(phba, cmdwqe);
1697 lpfc_sli_release_iocbq(phba, cmdwqe);
1701 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
1702 struct lpfc_nvmet_rcv_ctx *ctxp,
1703 uint32_t sid, uint16_t xri)
1705 struct lpfc_nvmet_tgtport *tgtp;
1706 struct lpfc_iocbq *abts_wqeq;
1707 union lpfc_wqe *wqe_abts;
1708 struct lpfc_nodelist *ndlp;
1710 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1711 "6067 Abort: sid %x xri x%x/x%x\n",
1712 sid, xri, ctxp->wqeq->sli4_xritag);
1714 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1716 ndlp = lpfc_findnode_did(phba->pport, sid);
1717 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1718 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1719 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1720 atomic_inc(&tgtp->xmt_abort_rsp_error);
1721 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1722 "6134 Drop ABTS - wrong NDLP state x%x.\n",
1723 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1725 /* No failure to an ABTS request. */
1729 abts_wqeq = ctxp->wqeq;
1730 wqe_abts = &abts_wqeq->wqe;
1731 ctxp->state = LPFC_NVMET_STE_ABORT;
1734 * Since we zero the whole WQE, we need to ensure we set the WQE fields
1735 * that were initialized in lpfc_sli4_nvmet_alloc.
1737 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
1740 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
1741 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
1742 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
1743 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
1744 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
1747 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
1748 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1749 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
1750 abts_wqeq->sli4_xritag);
1753 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
1754 CMD_XMIT_SEQUENCE64_WQE);
1755 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
1756 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
1757 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
1760 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
1763 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
1764 /* Needs to be set by caller */
1765 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
1768 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
1769 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1770 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
1771 LPFC_WQE_LENLOC_WORD12);
1772 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
1773 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
1776 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
1777 LPFC_WQE_CQ_ID_DEFAULT);
1778 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
1781 abts_wqeq->vport = phba->pport;
1782 abts_wqeq->context1 = ndlp;
1783 abts_wqeq->context2 = ctxp;
1784 abts_wqeq->context3 = NULL;
1785 abts_wqeq->rsvd2 = 0;
1786 /* hba_wqidx should already be setup from command we are aborting */
1787 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
1788 abts_wqeq->iocb.ulpLe = 1;
1790 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1791 "6069 Issue ABTS to xri x%x reqtag x%x\n",
1792 xri, abts_wqeq->iotag);
1797 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
1798 struct lpfc_nvmet_rcv_ctx *ctxp,
1799 uint32_t sid, uint16_t xri)
1801 struct lpfc_nvmet_tgtport *tgtp;
1802 struct lpfc_iocbq *abts_wqeq;
1803 union lpfc_wqe *abts_wqe;
1804 struct lpfc_nodelist *ndlp;
1805 unsigned long flags;
1808 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1810 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1811 ctxp->wqeq->hba_wqidx = 0;
1814 ndlp = lpfc_findnode_did(phba->pport, sid);
1815 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1816 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1817 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1818 atomic_inc(&tgtp->xmt_abort_rsp_error);
1819 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1820 "6160 Drop ABTS - wrong NDLP state x%x.\n",
1821 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
1823 /* No failure to an ABTS request. */
1827 /* Issue ABTS for this WQE based on iotag */
1828 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
1829 if (!ctxp->abort_wqeq) {
1830 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1831 "6161 Abort failed: No wqeqs: "
1832 "xri: x%x\n", ctxp->oxid);
1833 /* No failure to an ABTS request. */
1836 abts_wqeq = ctxp->abort_wqeq;
1837 abts_wqe = &abts_wqeq->wqe;
1838 ctxp->state = LPFC_NVMET_STE_ABORT;
1840 /* Announce entry to new IO submit field. */
1841 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
1842 "6162 Abort Request to rport DID x%06x "
1843 "for xri x%x x%x\n",
1844 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
1846 /* If the hba is getting reset, this flag is set. It is
1847 * cleared when the reset is complete and rings reestablished.
1849 spin_lock_irqsave(&phba->hbalock, flags);
1850 /* driver queued commands are in process of being flushed */
1851 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
1852 spin_unlock_irqrestore(&phba->hbalock, flags);
1853 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1854 "6163 Driver in reset cleanup - flushing "
1855 "NVME Req now. hba_flag x%x oxid x%x\n",
1856 phba->hba_flag, ctxp->oxid);
1857 lpfc_sli_release_iocbq(phba, abts_wqeq);
1861 /* Outstanding abort is in progress */
1862 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
1863 spin_unlock_irqrestore(&phba->hbalock, flags);
1864 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1865 "6164 Outstanding NVME I/O Abort Request "
1866 "still pending on oxid x%x\n",
1868 lpfc_sli_release_iocbq(phba, abts_wqeq);
1872 /* Ready - mark outstanding as aborted by driver. */
1873 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
1875 /* WQEs are reused. Clear stale data and set key fields to
1876 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
1878 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
1881 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
1884 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
1885 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
1887 /* word 8 - tell the FW to abort the IO associated with this
1888 * outstanding exchange ID.
1890 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
1892 /* word 9 - this is the iotag for the abts_wqe completion. */
1893 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
1897 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
1898 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
1901 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
1902 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
1903 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
1905 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
1906 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
1907 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
1908 abts_wqeq->iocb_cmpl = 0;
1909 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
1910 abts_wqeq->context2 = ctxp;
1911 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1912 spin_unlock_irqrestore(&phba->hbalock, flags);
1913 if (rc == WQE_SUCCESS)
1916 lpfc_sli_release_iocbq(phba, abts_wqeq);
1917 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
1918 "6166 Failed abts issue_wqe with status x%x "
1926 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
1927 struct lpfc_nvmet_rcv_ctx *ctxp,
1928 uint32_t sid, uint16_t xri)
1930 struct lpfc_nvmet_tgtport *tgtp;
1931 struct lpfc_iocbq *abts_wqeq;
1932 unsigned long flags;
1935 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1937 ctxp->wqeq = ctxp->rqb_buffer->iocbq;
1938 ctxp->wqeq->hba_wqidx = 0;
1941 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1945 spin_lock_irqsave(&phba->hbalock, flags);
1946 abts_wqeq = ctxp->wqeq;
1947 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_abort_cmp;
1948 abts_wqeq->iocb_cmpl = 0;
1949 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
1950 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
1951 spin_unlock_irqrestore(&phba->hbalock, flags);
1952 if (rc == WQE_SUCCESS) {
1953 atomic_inc(&tgtp->xmt_abort_rsp);
1958 lpfc_nvmet_rq_post(phba, ctxp, &ctxp->rqb_buffer->hbuf);
1959 atomic_inc(&tgtp->xmt_abort_rsp_error);
1960 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1961 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
1967 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
1968 struct lpfc_nvmet_rcv_ctx *ctxp,
1969 uint32_t sid, uint16_t xri)
1971 struct lpfc_nvmet_tgtport *tgtp;
1972 struct lpfc_iocbq *abts_wqeq;
1973 union lpfc_wqe *wqe_abts;
1974 unsigned long flags;
1977 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1979 /* Issue ABTS for this WQE based on iotag */
1980 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
1982 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
1983 "6068 Abort failed: No wqeqs: "
1985 /* No failure to an ABTS request. */
1990 abts_wqeq = ctxp->wqeq;
1991 wqe_abts = &abts_wqeq->wqe;
1992 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
1994 spin_lock_irqsave(&phba->hbalock, flags);
1995 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
1996 abts_wqeq->iocb_cmpl = 0;
1997 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
1998 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
1999 spin_unlock_irqrestore(&phba->hbalock, flags);
2000 if (rc == WQE_SUCCESS) {
2001 atomic_inc(&tgtp->xmt_abort_rsp);
2005 atomic_inc(&tgtp->xmt_abort_rsp_error);
2006 abts_wqeq->context2 = NULL;
2007 abts_wqeq->context3 = NULL;
2008 lpfc_sli_release_iocbq(phba, abts_wqeq);
2010 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2011 "6056 Failed to Issue ABTS. Status x%x\n", rc);