1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channsel Host Bus Adapters. *
4 * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
5 * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
6 * Copyright (C) 2004-2016 Emulex. All rights reserved. *
7 * EMULEX and SLI are trademarks of Emulex. *
9 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
11 * This program is free software; you can redistribute it and/or *
12 * modify it under the terms of version 2 of the GNU General *
13 * Public License as published by the Free Software Foundation. *
14 * This program is distributed in the hope that it will be useful. *
15 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
16 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
17 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
18 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19 * TO BE LEGALLY INVALID. See the GNU General Public License for *
20 * more details, a copy of which can be found in the file COPYING *
21 * included with this package. *
22 ********************************************************************/
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/delay.h>
27 #include <asm/unaligned.h>
28 #include <linux/crc-t10dif.h>
29 #include <net/checksum.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_device.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_tcq.h>
36 #include <scsi/scsi_transport_fc.h>
37 #include <scsi/fc/fc_fs.h>
39 #include <../drivers/nvme/host/nvme.h>
40 #include <linux/nvme-fc-driver.h>
42 #include "lpfc_version.h"
46 #include "lpfc_sli4.h"
48 #include "lpfc_disc.h"
50 #include "lpfc_scsi.h"
51 #include "lpfc_nvme.h"
52 #include "lpfc_nvmet.h"
53 #include "lpfc_logmsg.h"
54 #include "lpfc_crtn.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_debugfs.h"
58 static struct lpfc_iocbq *lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *,
59 struct lpfc_nvmet_rcv_ctx *,
62 static struct lpfc_iocbq *lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *,
63 struct lpfc_nvmet_rcv_ctx *);
64 static int lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *,
65 struct lpfc_nvmet_rcv_ctx *,
67 static int lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *,
68 struct lpfc_nvmet_rcv_ctx *,
70 static int lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *,
71 struct lpfc_nvmet_rcv_ctx *,
75 lpfc_nvmet_defer_release(struct lpfc_hba *phba, struct lpfc_nvmet_rcv_ctx *ctxp)
79 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
80 "6313 NVMET Defer ctx release xri x%x flg x%x\n",
81 ctxp->oxid, ctxp->flag);
83 spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
84 if (ctxp->flag & LPFC_NVMET_CTX_RLS) {
85 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock,
89 ctxp->flag |= LPFC_NVMET_CTX_RLS;
90 list_add_tail(&ctxp->list, &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
91 spin_unlock_irqrestore(&phba->sli4_hba.abts_nvme_buf_list_lock, iflag);
95 * lpfc_nvmet_xmt_ls_rsp_cmp - Completion handler for LS Response
96 * @phba: Pointer to HBA context object.
97 * @cmdwqe: Pointer to driver command WQE object.
98 * @wcqe: Pointer to driver response CQE object.
100 * The function is called from SLI ring event handler with no
101 * lock held. This function is the completion handler for NVME LS commands
102 * The function frees memory resources used for the NVME commands.
105 lpfc_nvmet_xmt_ls_rsp_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
106 struct lpfc_wcqe_complete *wcqe)
108 struct lpfc_nvmet_tgtport *tgtp;
109 struct nvmefc_tgt_ls_req *rsp;
110 struct lpfc_nvmet_rcv_ctx *ctxp;
111 uint32_t status, result;
113 status = bf_get(lpfc_wcqe_c_status, wcqe);
114 result = wcqe->parameter;
115 if (!phba->targetport)
118 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
121 atomic_inc(&tgtp->xmt_ls_rsp_error);
123 atomic_inc(&tgtp->xmt_ls_rsp_cmpl);
126 ctxp = cmdwqe->context2;
127 rsp = &ctxp->ctx.ls_req;
129 lpfc_nvmeio_data(phba, "NVMET LS CMPL: xri x%x stat x%x result x%x\n",
130 ctxp->oxid, status, result);
132 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
133 "6038 %s: Entrypoint: ctx %p status %x/%x\n", __func__,
134 ctxp, status, result);
136 lpfc_nlp_put(cmdwqe->context1);
137 cmdwqe->context2 = NULL;
138 cmdwqe->context3 = NULL;
139 lpfc_sli_release_iocbq(phba, cmdwqe);
145 * lpfc_nvmet_ctxbuf_post - Repost a NVMET RQ DMA buffer and clean up context
146 * @phba: HBA buffer is associated with
147 * @ctxp: context to clean up
148 * @mp: Buffer to free
150 * Description: Frees the given DMA buffer in the appropriate way given by
151 * reposting it to its associated RQ so it can be reused.
153 * Notes: Takes phba->hbalock. Can be called with or without other locks held.
158 lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf)
160 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
161 struct lpfc_nvmet_rcv_ctx *ctxp = ctx_buf->context;
162 struct lpfc_nvmet_tgtport *tgtp;
163 struct fc_frame_header *fc_hdr;
164 struct rqb_dmabuf *nvmebuf;
165 struct lpfc_dmabuf *hbufp;
167 uint32_t size, oxid, sid, rc;
171 pci_pool_free(phba->txrdy_payload_pool, ctxp->txrdy,
174 ctxp->txrdy_phys = 0;
176 ctxp->state = LPFC_NVMET_STE_FREE;
178 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
179 if (phba->sli4_hba.nvmet_io_wait_cnt) {
180 hbufp = &nvmebuf->hbuf;
181 list_remove_head(&phba->sli4_hba.lpfc_nvmet_io_wait_list,
182 nvmebuf, struct rqb_dmabuf,
184 phba->sli4_hba.nvmet_io_wait_cnt--;
185 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
188 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
189 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
190 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
191 payload = (uint32_t *)(nvmebuf->dbuf.virt);
192 size = nvmebuf->bytes_recv;
193 sid = sli4_sid_from_fc_hdr(fc_hdr);
195 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
196 memset(ctxp, 0, sizeof(ctxp->ctx));
204 ctxp->state = LPFC_NVMET_STE_RCV;
207 ctxp->ctxbuf = ctx_buf;
208 spin_lock_init(&ctxp->ctxlock);
210 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
211 if (phba->ktime_on) {
212 ctxp->ts_cmd_nvme = ktime_get_ns();
213 ctxp->ts_isr_cmd = ctxp->ts_cmd_nvme;
214 ctxp->ts_nvme_data = 0;
215 ctxp->ts_data_wqput = 0;
216 ctxp->ts_isr_data = 0;
217 ctxp->ts_data_nvme = 0;
218 ctxp->ts_nvme_status = 0;
219 ctxp->ts_status_wqput = 0;
220 ctxp->ts_isr_status = 0;
221 ctxp->ts_status_nvme = 0;
224 atomic_inc(&tgtp->rcv_fcp_cmd_in);
226 * The calling sequence should be:
227 * nvmet_fc_rcv_fcp_req->lpfc_nvmet_xmt_fcp_op/cmp- req->done
228 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
229 * When we return from nvmet_fc_rcv_fcp_req, all relevant info
230 * the NVME command / FC header is stored.
231 * A buffer has already been reposted for this IO, so just free
234 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
237 /* Process FCP command */
239 atomic_inc(&tgtp->rcv_fcp_cmd_out);
240 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
244 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
245 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
246 "2582 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
248 atomic_read(&tgtp->rcv_fcp_cmd_in),
249 atomic_read(&tgtp->rcv_fcp_cmd_out),
250 atomic_read(&tgtp->xmt_fcp_release));
252 lpfc_nvmet_defer_release(phba, ctxp);
253 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
254 nvmebuf->hrq->rqbp->rqb_free_buffer(phba, nvmebuf);
257 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
259 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
260 list_add_tail(&ctx_buf->list,
261 &phba->sli4_hba.lpfc_nvmet_ctx_list);
262 phba->sli4_hba.nvmet_ctx_cnt++;
263 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
267 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
269 lpfc_nvmet_ktime(struct lpfc_hba *phba,
270 struct lpfc_nvmet_rcv_ctx *ctxp)
272 uint64_t seg1, seg2, seg3, seg4, seg5;
273 uint64_t seg6, seg7, seg8, seg9, seg10;
278 if (!ctxp->ts_isr_cmd || !ctxp->ts_cmd_nvme ||
279 !ctxp->ts_nvme_data || !ctxp->ts_data_wqput ||
280 !ctxp->ts_isr_data || !ctxp->ts_data_nvme ||
281 !ctxp->ts_nvme_status || !ctxp->ts_status_wqput ||
282 !ctxp->ts_isr_status || !ctxp->ts_status_nvme)
285 if (ctxp->ts_isr_cmd > ctxp->ts_cmd_nvme)
287 if (ctxp->ts_cmd_nvme > ctxp->ts_nvme_data)
289 if (ctxp->ts_nvme_data > ctxp->ts_data_wqput)
291 if (ctxp->ts_data_wqput > ctxp->ts_isr_data)
293 if (ctxp->ts_isr_data > ctxp->ts_data_nvme)
295 if (ctxp->ts_data_nvme > ctxp->ts_nvme_status)
297 if (ctxp->ts_nvme_status > ctxp->ts_status_wqput)
299 if (ctxp->ts_status_wqput > ctxp->ts_isr_status)
301 if (ctxp->ts_isr_status > ctxp->ts_status_nvme)
304 * Segment 1 - Time from FCP command received by MSI-X ISR
305 * to FCP command is passed to NVME Layer.
306 * Segment 2 - Time from FCP command payload handed
307 * off to NVME Layer to Driver receives a Command op
309 * Segment 3 - Time from Driver receives a Command op
310 * from NVME Layer to Command is put on WQ.
311 * Segment 4 - Time from Driver WQ put is done
312 * to MSI-X ISR for Command cmpl.
313 * Segment 5 - Time from MSI-X ISR for Command cmpl to
314 * Command cmpl is passed to NVME Layer.
315 * Segment 6 - Time from Command cmpl is passed to NVME
316 * Layer to Driver receives a RSP op from NVME Layer.
317 * Segment 7 - Time from Driver receives a RSP op from
318 * NVME Layer to WQ put is done on TRSP FCP Status.
319 * Segment 8 - Time from Driver WQ put is done on TRSP
320 * FCP Status to MSI-X ISR for TRSP cmpl.
321 * Segment 9 - Time from MSI-X ISR for TRSP cmpl to
322 * TRSP cmpl is passed to NVME Layer.
323 * Segment 10 - Time from FCP command received by
324 * MSI-X ISR to command is completed on wire.
325 * (Segments 1 thru 8) for READDATA / WRITEDATA
326 * (Segments 1 thru 4) for READDATA_RSP
328 seg1 = ctxp->ts_cmd_nvme - ctxp->ts_isr_cmd;
329 seg2 = (ctxp->ts_nvme_data - ctxp->ts_isr_cmd) - seg1;
330 seg3 = (ctxp->ts_data_wqput - ctxp->ts_isr_cmd) -
332 seg4 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd) -
334 seg5 = (ctxp->ts_data_nvme - ctxp->ts_isr_cmd) -
335 seg1 - seg2 - seg3 - seg4;
337 /* For auto rsp commands seg6 thru seg10 will be 0 */
338 if (ctxp->ts_nvme_status > ctxp->ts_data_nvme) {
339 seg6 = (ctxp->ts_nvme_status -
341 seg1 - seg2 - seg3 - seg4 - seg5;
342 seg7 = (ctxp->ts_status_wqput -
346 seg8 = (ctxp->ts_isr_status -
348 seg1 - seg2 - seg3 - seg4 -
350 seg9 = (ctxp->ts_status_nvme -
352 seg1 - seg2 - seg3 - seg4 -
353 seg5 - seg6 - seg7 - seg8;
354 seg10 = (ctxp->ts_isr_status -
361 seg10 = (ctxp->ts_isr_data - ctxp->ts_isr_cmd);
364 phba->ktime_seg1_total += seg1;
365 if (seg1 < phba->ktime_seg1_min)
366 phba->ktime_seg1_min = seg1;
367 else if (seg1 > phba->ktime_seg1_max)
368 phba->ktime_seg1_max = seg1;
370 phba->ktime_seg2_total += seg2;
371 if (seg2 < phba->ktime_seg2_min)
372 phba->ktime_seg2_min = seg2;
373 else if (seg2 > phba->ktime_seg2_max)
374 phba->ktime_seg2_max = seg2;
376 phba->ktime_seg3_total += seg3;
377 if (seg3 < phba->ktime_seg3_min)
378 phba->ktime_seg3_min = seg3;
379 else if (seg3 > phba->ktime_seg3_max)
380 phba->ktime_seg3_max = seg3;
382 phba->ktime_seg4_total += seg4;
383 if (seg4 < phba->ktime_seg4_min)
384 phba->ktime_seg4_min = seg4;
385 else if (seg4 > phba->ktime_seg4_max)
386 phba->ktime_seg4_max = seg4;
388 phba->ktime_seg5_total += seg5;
389 if (seg5 < phba->ktime_seg5_min)
390 phba->ktime_seg5_min = seg5;
391 else if (seg5 > phba->ktime_seg5_max)
392 phba->ktime_seg5_max = seg5;
394 phba->ktime_data_samples++;
398 phba->ktime_seg6_total += seg6;
399 if (seg6 < phba->ktime_seg6_min)
400 phba->ktime_seg6_min = seg6;
401 else if (seg6 > phba->ktime_seg6_max)
402 phba->ktime_seg6_max = seg6;
404 phba->ktime_seg7_total += seg7;
405 if (seg7 < phba->ktime_seg7_min)
406 phba->ktime_seg7_min = seg7;
407 else if (seg7 > phba->ktime_seg7_max)
408 phba->ktime_seg7_max = seg7;
410 phba->ktime_seg8_total += seg8;
411 if (seg8 < phba->ktime_seg8_min)
412 phba->ktime_seg8_min = seg8;
413 else if (seg8 > phba->ktime_seg8_max)
414 phba->ktime_seg8_max = seg8;
416 phba->ktime_seg9_total += seg9;
417 if (seg9 < phba->ktime_seg9_min)
418 phba->ktime_seg9_min = seg9;
419 else if (seg9 > phba->ktime_seg9_max)
420 phba->ktime_seg9_max = seg9;
422 phba->ktime_seg10_total += seg10;
423 if (seg10 < phba->ktime_seg10_min)
424 phba->ktime_seg10_min = seg10;
425 else if (seg10 > phba->ktime_seg10_max)
426 phba->ktime_seg10_max = seg10;
427 phba->ktime_status_samples++;
432 * lpfc_nvmet_xmt_fcp_op_cmp - Completion handler for FCP Response
433 * @phba: Pointer to HBA context object.
434 * @cmdwqe: Pointer to driver command WQE object.
435 * @wcqe: Pointer to driver response CQE object.
437 * The function is called from SLI ring event handler with no
438 * lock held. This function is the completion handler for NVME FCP commands
439 * The function frees memory resources used for the NVME commands.
442 lpfc_nvmet_xmt_fcp_op_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
443 struct lpfc_wcqe_complete *wcqe)
445 struct lpfc_nvmet_tgtport *tgtp;
446 struct nvmefc_tgt_fcp_req *rsp;
447 struct lpfc_nvmet_rcv_ctx *ctxp;
448 uint32_t status, result, op, start_clean;
449 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
453 ctxp = cmdwqe->context2;
454 ctxp->flag &= ~LPFC_NVMET_IO_INP;
456 rsp = &ctxp->ctx.fcp_req;
459 status = bf_get(lpfc_wcqe_c_status, wcqe);
460 result = wcqe->parameter;
462 if (phba->targetport)
463 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
467 lpfc_nvmeio_data(phba, "NVMET FCP CMPL: xri x%x op x%x status x%x\n",
468 ctxp->oxid, op, status);
471 rsp->fcp_error = NVME_SC_DATA_XFER_ERROR;
472 rsp->transferred_length = 0;
474 atomic_inc(&tgtp->xmt_fcp_rsp_error);
476 /* pick up SLI4 exhange busy condition */
477 if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
478 ctxp->flag |= LPFC_NVMET_XBUSY;
480 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
481 "6315 IO Cmpl XBUSY: xri x%x: %x/%x\n",
482 ctxp->oxid, status, result);
484 ctxp->flag &= ~LPFC_NVMET_XBUSY;
488 rsp->fcp_error = NVME_SC_SUCCESS;
489 if (op == NVMET_FCOP_RSP)
490 rsp->transferred_length = rsp->rsplen;
492 rsp->transferred_length = rsp->transfer_length;
494 atomic_inc(&tgtp->xmt_fcp_rsp_cmpl);
497 if ((op == NVMET_FCOP_READDATA_RSP) ||
498 (op == NVMET_FCOP_RSP)) {
500 ctxp->state = LPFC_NVMET_STE_DONE;
503 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
504 if (phba->ktime_on) {
505 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
507 cmdwqe->isr_timestamp;
510 ctxp->ts_nvme_status =
512 ctxp->ts_status_wqput =
514 ctxp->ts_isr_status =
516 ctxp->ts_status_nvme =
519 ctxp->ts_isr_status =
520 cmdwqe->isr_timestamp;
521 ctxp->ts_status_nvme =
525 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
526 id = smp_processor_id();
528 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
529 "6703 CPU Check cmpl: "
530 "cpu %d expect %d\n",
532 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
533 phba->cpucheck_cmpl_io[id]++;
537 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
539 lpfc_nvmet_ktime(phba, ctxp);
541 /* lpfc_nvmet_xmt_fcp_release() will recycle the context */
544 start_clean = offsetof(struct lpfc_iocbq, wqe);
545 memset(((char *)cmdwqe) + start_clean, 0,
546 (sizeof(struct lpfc_iocbq) - start_clean));
547 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
548 if (phba->ktime_on) {
549 ctxp->ts_isr_data = cmdwqe->isr_timestamp;
550 ctxp->ts_data_nvme = ktime_get_ns();
552 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
553 id = smp_processor_id();
555 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
556 "6704 CPU Check cmdcmpl: "
557 "cpu %d expect %d\n",
559 if (ctxp->cpu < LPFC_CHECK_CPU_CNT)
560 phba->cpucheck_ccmpl_io[id]++;
568 lpfc_nvmet_xmt_ls_rsp(struct nvmet_fc_target_port *tgtport,
569 struct nvmefc_tgt_ls_req *rsp)
571 struct lpfc_nvmet_rcv_ctx *ctxp =
572 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.ls_req);
573 struct lpfc_hba *phba = ctxp->phba;
574 struct hbq_dmabuf *nvmebuf =
575 (struct hbq_dmabuf *)ctxp->rqb_buffer;
576 struct lpfc_iocbq *nvmewqeq;
577 struct lpfc_nvmet_tgtport *nvmep = tgtport->private;
578 struct lpfc_dmabuf dmabuf;
579 struct ulp_bde64 bpl;
582 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
583 "6023 %s: Entrypoint ctx %p %p\n", __func__,
586 nvmewqeq = lpfc_nvmet_prep_ls_wqe(phba, ctxp, rsp->rspdma,
588 if (nvmewqeq == NULL) {
589 atomic_inc(&nvmep->xmt_ls_drop);
590 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
591 "6150 LS Drop IO x%x: Prep\n",
593 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
594 atomic_inc(&nvmep->xmt_ls_abort);
595 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp,
596 ctxp->sid, ctxp->oxid);
600 /* Save numBdes for bpl2sgl */
602 nvmewqeq->hba_wqidx = 0;
603 nvmewqeq->context3 = &dmabuf;
605 bpl.addrLow = nvmewqeq->wqe.xmit_sequence.bde.addrLow;
606 bpl.addrHigh = nvmewqeq->wqe.xmit_sequence.bde.addrHigh;
607 bpl.tus.f.bdeSize = rsp->rsplen;
608 bpl.tus.f.bdeFlags = 0;
609 bpl.tus.w = le32_to_cpu(bpl.tus.w);
611 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_rsp_cmp;
612 nvmewqeq->iocb_cmpl = NULL;
613 nvmewqeq->context2 = ctxp;
615 lpfc_nvmeio_data(phba, "NVMET LS RESP: xri x%x wqidx x%x len x%x\n",
616 ctxp->oxid, nvmewqeq->hba_wqidx, rsp->rsplen);
618 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, nvmewqeq);
619 if (rc == WQE_SUCCESS) {
621 * Okay to repost buffer here, but wait till cmpl
622 * before freeing ctxp and iocbq.
624 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
625 ctxp->rqb_buffer = 0;
626 atomic_inc(&nvmep->xmt_ls_rsp);
629 /* Give back resources */
630 atomic_inc(&nvmep->xmt_ls_drop);
631 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
632 "6151 LS Drop IO x%x: Issue %d\n",
635 lpfc_nlp_put(nvmewqeq->context1);
637 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
638 atomic_inc(&nvmep->xmt_ls_abort);
639 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid);
644 lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
645 struct nvmefc_tgt_fcp_req *rsp)
647 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
648 struct lpfc_nvmet_rcv_ctx *ctxp =
649 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
650 struct lpfc_hba *phba = ctxp->phba;
651 struct lpfc_iocbq *nvmewqeq;
654 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
655 if (phba->ktime_on) {
656 if (rsp->op == NVMET_FCOP_RSP)
657 ctxp->ts_nvme_status = ktime_get_ns();
659 ctxp->ts_nvme_data = ktime_get_ns();
661 if (phba->cpucheck_on & LPFC_CHECK_NVMET_IO) {
662 int id = smp_processor_id();
664 if (id < LPFC_CHECK_CPU_CNT)
665 phba->cpucheck_xmt_io[id]++;
666 if (rsp->hwqid != id) {
667 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
668 "6705 CPU Check OP: "
669 "cpu %d expect %d\n",
671 ctxp->cpu = rsp->hwqid;
677 if ((ctxp->flag & LPFC_NVMET_ABTS_RCV) ||
678 (ctxp->state == LPFC_NVMET_STE_ABORT)) {
679 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
680 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
681 "6102 IO xri x%x aborted\n",
687 nvmewqeq = lpfc_nvmet_prep_fcp_wqe(phba, ctxp);
688 if (nvmewqeq == NULL) {
689 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
690 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
691 "6152 FCP Drop IO x%x: Prep\n",
697 nvmewqeq->wqe_cmpl = lpfc_nvmet_xmt_fcp_op_cmp;
698 nvmewqeq->iocb_cmpl = NULL;
699 nvmewqeq->context2 = ctxp;
700 nvmewqeq->iocb_flag |= LPFC_IO_NVMET;
701 ctxp->wqeq->hba_wqidx = rsp->hwqid;
703 lpfc_nvmeio_data(phba, "NVMET FCP CMND: xri x%x op x%x len x%x\n",
704 ctxp->oxid, rsp->op, rsp->rsplen);
706 ctxp->flag |= LPFC_NVMET_IO_INP;
707 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, nvmewqeq);
708 if (rc == WQE_SUCCESS) {
709 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
712 if (rsp->op == NVMET_FCOP_RSP)
713 ctxp->ts_status_wqput = ktime_get_ns();
715 ctxp->ts_data_wqput = ktime_get_ns();
720 /* Give back resources */
721 atomic_inc(&lpfc_nvmep->xmt_fcp_drop);
722 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
723 "6153 FCP Drop IO x%x: Issue: %d\n",
726 ctxp->wqeq->hba_wqidx = 0;
727 nvmewqeq->context2 = NULL;
728 nvmewqeq->context3 = NULL;
735 lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
737 struct lpfc_nvmet_tgtport *tport = targetport->private;
739 /* release any threads waiting for the unreg to complete */
740 complete(&tport->tport_unreg_done);
744 lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport,
745 struct nvmefc_tgt_fcp_req *req)
747 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
748 struct lpfc_nvmet_rcv_ctx *ctxp =
749 container_of(req, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
750 struct lpfc_hba *phba = ctxp->phba;
753 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
754 "6103 Abort op: oxri x%x flg x%x cnt %d\n",
755 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
757 lpfc_nvmeio_data(phba, "NVMET FCP ABRT: "
758 "xri x%x flg x%x cnt x%x\n",
759 ctxp->oxid, ctxp->flag, ctxp->entry_cnt);
761 atomic_inc(&lpfc_nvmep->xmt_fcp_abort);
763 spin_lock_irqsave(&ctxp->ctxlock, flags);
765 /* Since iaab/iaar are NOT set, we need to check
766 * if the firmware is in process of aborting IO
768 if (ctxp->flag & LPFC_NVMET_XBUSY) {
769 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
772 ctxp->flag |= LPFC_NVMET_ABORT_OP;
773 if (ctxp->flag & LPFC_NVMET_IO_INP)
774 lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid,
777 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid,
779 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
783 lpfc_nvmet_xmt_fcp_release(struct nvmet_fc_target_port *tgtport,
784 struct nvmefc_tgt_fcp_req *rsp)
786 struct lpfc_nvmet_tgtport *lpfc_nvmep = tgtport->private;
787 struct lpfc_nvmet_rcv_ctx *ctxp =
788 container_of(rsp, struct lpfc_nvmet_rcv_ctx, ctx.fcp_req);
789 struct lpfc_hba *phba = ctxp->phba;
791 bool aborting = false;
793 spin_lock_irqsave(&ctxp->ctxlock, flags);
794 if ((ctxp->flag & LPFC_NVMET_ABORT_OP) ||
795 (ctxp->flag & LPFC_NVMET_XBUSY)) {
797 /* let the abort path do the real release */
798 lpfc_nvmet_defer_release(phba, ctxp);
800 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
802 lpfc_nvmeio_data(phba, "NVMET FCP FREE: xri x%x ste %d\n", ctxp->oxid,
805 atomic_inc(&lpfc_nvmep->xmt_fcp_release);
810 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
813 static struct nvmet_fc_target_template lpfc_tgttemplate = {
814 .targetport_delete = lpfc_nvmet_targetport_delete,
815 .xmt_ls_rsp = lpfc_nvmet_xmt_ls_rsp,
816 .fcp_op = lpfc_nvmet_xmt_fcp_op,
817 .fcp_abort = lpfc_nvmet_xmt_fcp_abort,
818 .fcp_req_release = lpfc_nvmet_xmt_fcp_release,
821 .max_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
822 .max_dif_sgl_segments = LPFC_NVMET_DEFAULT_SEGS,
823 .dma_boundary = 0xFFFFFFFF,
825 /* optional features */
826 .target_features = 0,
827 /* sizes of additional private data for data structures */
828 .target_priv_sz = sizeof(struct lpfc_nvmet_tgtport),
832 lpfc_nvmet_cleanup_io_context(struct lpfc_hba *phba)
834 struct lpfc_nvmet_ctxbuf *ctx_buf, *next_ctx_buf;
837 list_for_each_entry_safe(
838 ctx_buf, next_ctx_buf,
839 &phba->sli4_hba.lpfc_nvmet_ctx_list, list) {
841 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
842 list_del_init(&ctx_buf->list);
843 spin_unlock_irqrestore(
844 &phba->sli4_hba.abts_nvme_buf_list_lock, flags);
845 __lpfc_clear_active_sglq(phba,
846 ctx_buf->sglq->sli4_lxritag);
847 ctx_buf->sglq->state = SGL_FREED;
848 ctx_buf->sglq->ndlp = NULL;
850 spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock, flags);
851 list_add_tail(&ctx_buf->sglq->list,
852 &phba->sli4_hba.lpfc_nvmet_sgl_list);
853 spin_unlock_irqrestore(&phba->sli4_hba.sgl_list_lock,
856 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
857 kfree(ctx_buf->context);
862 lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
864 struct lpfc_nvmet_ctxbuf *ctx_buf;
865 struct lpfc_iocbq *nvmewqe;
866 union lpfc_wqe128 *wqe;
869 lpfc_printf_log(phba, KERN_INFO, LOG_NVME,
870 "6403 Allocate NVMET resources for %d XRIs\n",
871 phba->sli4_hba.nvmet_xri_cnt);
873 /* For all nvmet xris, allocate resources needed to process a
874 * received command on a per xri basis.
876 for (i = 0; i < phba->sli4_hba.nvmet_xri_cnt; i++) {
877 ctx_buf = kzalloc(sizeof(*ctx_buf), GFP_KERNEL);
879 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
880 "6404 Ran out of memory for NVMET\n");
884 ctx_buf->context = kzalloc(sizeof(*ctx_buf->context),
886 if (!ctx_buf->context) {
888 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
889 "6405 Ran out of NVMET "
893 ctx_buf->context->ctxbuf = ctx_buf;
895 ctx_buf->iocbq = lpfc_sli_get_iocbq(phba);
896 if (!ctx_buf->iocbq) {
897 kfree(ctx_buf->context);
899 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
900 "6406 Ran out of NVMET iocb/WQEs\n");
903 ctx_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
904 nvmewqe = ctx_buf->iocbq;
905 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
907 memset(wqe, 0, sizeof(union lpfc_wqe));
909 bf_set(wqe_ct, &wqe->generic.wqe_com, SLI4_CT_RPI);
910 bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
911 bf_set(wqe_pu, &wqe->generic.wqe_com, 1);
913 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
914 bf_set(wqe_ebde_cnt, &wqe->generic.wqe_com, 0);
915 bf_set(wqe_qosd, &wqe->generic.wqe_com, 0);
917 ctx_buf->iocbq->context1 = NULL;
918 spin_lock(&phba->sli4_hba.sgl_list_lock);
919 ctx_buf->sglq = __lpfc_sli_get_nvmet_sglq(phba, ctx_buf->iocbq);
920 spin_unlock(&phba->sli4_hba.sgl_list_lock);
921 if (!ctx_buf->sglq) {
922 lpfc_sli_release_iocbq(phba, ctx_buf->iocbq);
923 kfree(ctx_buf->context);
925 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
926 "6407 Ran out of NVMET XRIs\n");
929 spin_lock(&phba->sli4_hba.nvmet_io_lock);
930 list_add_tail(&ctx_buf->list,
931 &phba->sli4_hba.lpfc_nvmet_ctx_list);
932 spin_unlock(&phba->sli4_hba.nvmet_io_lock);
934 phba->sli4_hba.nvmet_ctx_cnt = phba->sli4_hba.nvmet_xri_cnt;
939 lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
941 struct lpfc_vport *vport = phba->pport;
942 struct lpfc_nvmet_tgtport *tgtp;
943 struct nvmet_fc_port_info pinfo;
946 if (phba->targetport)
949 error = lpfc_nvmet_setup_io_context(phba);
953 memset(&pinfo, 0, sizeof(struct nvmet_fc_port_info));
954 pinfo.node_name = wwn_to_u64(vport->fc_nodename.u.wwn);
955 pinfo.port_name = wwn_to_u64(vport->fc_portname.u.wwn);
956 pinfo.port_id = vport->fc_myDID;
958 /* Limit to LPFC_MAX_NVME_SEG_CNT.
959 * For now need + 1 to get around NVME transport logic.
961 if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) {
962 lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT,
963 "6400 Reducing sg segment cnt to %d\n",
964 LPFC_MAX_NVME_SEG_CNT);
965 phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT;
967 phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt;
969 lpfc_tgttemplate.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1;
970 lpfc_tgttemplate.max_hw_queues = phba->cfg_nvme_io_channel;
971 lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
972 NVMET_FCTGTFEAT_CMD_IN_ISR |
973 NVMET_FCTGTFEAT_OPDONE_IN_ISR;
975 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
976 error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
983 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
984 "6025 Cannot register NVME targetport "
986 phba->targetport = NULL;
988 lpfc_nvmet_cleanup_io_context(phba);
991 tgtp = (struct lpfc_nvmet_tgtport *)
992 phba->targetport->private;
995 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
996 "6026 Registered NVME "
997 "targetport: %p, private %p "
998 "portnm %llx nodenm %llx\n",
999 phba->targetport, tgtp,
1000 pinfo.port_name, pinfo.node_name);
1002 atomic_set(&tgtp->rcv_ls_req_in, 0);
1003 atomic_set(&tgtp->rcv_ls_req_out, 0);
1004 atomic_set(&tgtp->rcv_ls_req_drop, 0);
1005 atomic_set(&tgtp->xmt_ls_abort, 0);
1006 atomic_set(&tgtp->xmt_ls_abort_cmpl, 0);
1007 atomic_set(&tgtp->xmt_ls_rsp, 0);
1008 atomic_set(&tgtp->xmt_ls_drop, 0);
1009 atomic_set(&tgtp->xmt_ls_rsp_error, 0);
1010 atomic_set(&tgtp->xmt_ls_rsp_cmpl, 0);
1011 atomic_set(&tgtp->rcv_fcp_cmd_in, 0);
1012 atomic_set(&tgtp->rcv_fcp_cmd_out, 0);
1013 atomic_set(&tgtp->rcv_fcp_cmd_drop, 0);
1014 atomic_set(&tgtp->xmt_fcp_drop, 0);
1015 atomic_set(&tgtp->xmt_fcp_read_rsp, 0);
1016 atomic_set(&tgtp->xmt_fcp_read, 0);
1017 atomic_set(&tgtp->xmt_fcp_write, 0);
1018 atomic_set(&tgtp->xmt_fcp_rsp, 0);
1019 atomic_set(&tgtp->xmt_fcp_release, 0);
1020 atomic_set(&tgtp->xmt_fcp_rsp_cmpl, 0);
1021 atomic_set(&tgtp->xmt_fcp_rsp_error, 0);
1022 atomic_set(&tgtp->xmt_fcp_rsp_drop, 0);
1023 atomic_set(&tgtp->xmt_fcp_abort, 0);
1024 atomic_set(&tgtp->xmt_fcp_abort_cmpl, 0);
1025 atomic_set(&tgtp->xmt_abort_unsol, 0);
1026 atomic_set(&tgtp->xmt_abort_sol, 0);
1027 atomic_set(&tgtp->xmt_abort_rsp, 0);
1028 atomic_set(&tgtp->xmt_abort_rsp_error, 0);
1034 lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
1036 struct lpfc_vport *vport = phba->pport;
1038 if (!phba->targetport)
1041 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
1042 "6007 Update NVMET port %p did x%x\n",
1043 phba->targetport, vport->fc_myDID);
1045 phba->targetport->port_id = vport->fc_myDID;
1050 * lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
1051 * @phba: pointer to lpfc hba data structure.
1052 * @axri: pointer to the nvmet xri abort wcqe structure.
1054 * This routine is invoked by the worker thread to process a SLI4 fast-path
1055 * NVMET aborted xri.
1058 lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
1059 struct sli4_wcqe_xri_aborted *axri)
1061 uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
1062 uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
1063 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1064 struct lpfc_nodelist *ndlp;
1065 unsigned long iflag = 0;
1067 bool released = false;
1069 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1070 "6317 XB aborted xri x%x rxid x%x\n", xri, rxid);
1072 if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1074 spin_lock_irqsave(&phba->hbalock, iflag);
1075 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1076 list_for_each_entry_safe(ctxp, next_ctxp,
1077 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1079 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1082 /* Check if we already received a free context call
1083 * and we have completed processing an abort situation.
1085 if (ctxp->flag & LPFC_NVMET_CTX_RLS &&
1086 !(ctxp->flag & LPFC_NVMET_ABORT_OP)) {
1087 list_del(&ctxp->list);
1090 ctxp->flag &= ~LPFC_NVMET_XBUSY;
1091 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1093 rrq_empty = list_empty(&phba->active_rrq_list);
1094 spin_unlock_irqrestore(&phba->hbalock, iflag);
1095 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1096 if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
1097 (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE ||
1098 ndlp->nlp_state == NLP_STE_MAPPED_NODE)) {
1099 lpfc_set_rrq_active(phba, ndlp,
1100 ctxp->ctxbuf->sglq->sli4_lxritag,
1102 lpfc_sli4_abts_err_handler(phba, ndlp, axri);
1105 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1106 "6318 XB aborted %x flg x%x (%x)\n",
1107 ctxp->oxid, ctxp->flag, released);
1109 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
1112 lpfc_worker_wake_up(phba);
1115 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1116 spin_unlock_irqrestore(&phba->hbalock, iflag);
1120 lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
1121 struct fc_frame_header *fc_hdr)
1124 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1125 struct lpfc_hba *phba = vport->phba;
1126 struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1127 struct nvmefc_tgt_fcp_req *rsp;
1129 unsigned long iflag = 0;
1131 xri = be16_to_cpu(fc_hdr->fh_ox_id);
1133 spin_lock_irqsave(&phba->hbalock, iflag);
1134 spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1135 list_for_each_entry_safe(ctxp, next_ctxp,
1136 &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1138 if (ctxp->ctxbuf->sglq->sli4_xritag != xri)
1141 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1142 spin_unlock_irqrestore(&phba->hbalock, iflag);
1144 spin_lock_irqsave(&ctxp->ctxlock, iflag);
1145 ctxp->flag |= LPFC_NVMET_ABTS_RCV;
1146 spin_unlock_irqrestore(&ctxp->ctxlock, iflag);
1148 lpfc_nvmeio_data(phba,
1149 "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1150 xri, smp_processor_id(), 0);
1152 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1153 "6319 NVMET Rcv ABTS:acc xri x%x\n", xri);
1155 rsp = &ctxp->ctx.fcp_req;
1156 nvmet_fc_rcv_fcp_abort(phba->targetport, rsp);
1158 /* Respond with BA_ACC accordingly */
1159 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1);
1162 spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
1163 spin_unlock_irqrestore(&phba->hbalock, iflag);
1165 lpfc_nvmeio_data(phba, "NVMET ABTS RCV: xri x%x CPU %02x rjt %d\n",
1166 xri, smp_processor_id(), 1);
1168 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
1169 "6320 NVMET Rcv ABTS:rjt xri x%x\n", xri);
1171 /* Respond with BA_RJT accordingly */
1172 lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 0);
1178 lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
1180 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1181 struct lpfc_nvmet_tgtport *tgtp;
1183 if (phba->nvmet_support == 0)
1185 if (phba->targetport) {
1186 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1187 init_completion(&tgtp->tport_unreg_done);
1188 nvmet_fc_unregister_targetport(phba->targetport);
1189 wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
1190 lpfc_nvmet_cleanup_io_context(phba);
1192 phba->targetport = NULL;
1197 * lpfc_nvmet_unsol_ls_buffer - Process an unsolicited event data buffer
1198 * @phba: pointer to lpfc hba data structure.
1199 * @pring: pointer to a SLI ring.
1200 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1202 * This routine is used for processing the WQE associated with a unsolicited
1203 * event. It first determines whether there is an existing ndlp that matches
1204 * the DID from the unsolicited WQE. If not, it will create a new one with
1205 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1206 * WQE is then used to invoke the proper routine and to set up proper state
1207 * of the discovery state machine.
1210 lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1211 struct hbq_dmabuf *nvmebuf)
1213 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1214 struct lpfc_nvmet_tgtport *tgtp;
1215 struct fc_frame_header *fc_hdr;
1216 struct lpfc_nvmet_rcv_ctx *ctxp;
1218 uint32_t size, oxid, sid, rc;
1220 if (!nvmebuf || !phba->targetport) {
1221 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1222 "6154 LS Drop IO\n");
1230 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1231 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1232 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1233 size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
1234 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1235 sid = sli4_sid_from_fc_hdr(fc_hdr);
1237 ctxp = kzalloc(sizeof(struct lpfc_nvmet_rcv_ctx), GFP_ATOMIC);
1239 atomic_inc(&tgtp->rcv_ls_req_drop);
1240 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1241 "6155 LS Drop IO x%x: Alloc\n",
1244 lpfc_nvmeio_data(phba, "NVMET LS DROP: "
1245 "xri x%x sz %d from %06x\n",
1248 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1256 ctxp->state = LPFC_NVMET_STE_RCV;
1257 ctxp->rqb_buffer = (void *)nvmebuf;
1259 lpfc_nvmeio_data(phba, "NVMET LS RCV: xri x%x sz %d from %06x\n",
1262 * The calling sequence should be:
1263 * nvmet_fc_rcv_ls_req -> lpfc_nvmet_xmt_ls_rsp/cmp ->_req->done
1264 * lpfc_nvmet_xmt_ls_rsp_cmp should free the allocated ctxp.
1266 atomic_inc(&tgtp->rcv_ls_req_in);
1267 rc = nvmet_fc_rcv_ls_req(phba->targetport, &ctxp->ctx.ls_req,
1270 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1271 "6037 %s: ctx %p sz %d rc %d: %08x %08x %08x "
1272 "%08x %08x %08x\n", __func__, ctxp, size, rc,
1273 *payload, *(payload+1), *(payload+2),
1274 *(payload+3), *(payload+4), *(payload+5));
1277 atomic_inc(&tgtp->rcv_ls_req_out);
1281 lpfc_nvmeio_data(phba, "NVMET LS DROP: xri x%x sz %d from %06x\n",
1284 atomic_inc(&tgtp->rcv_ls_req_drop);
1285 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1286 "6156 LS Drop IO x%x: nvmet_fc_rcv_ls_req %d\n",
1289 /* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
1291 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1293 atomic_inc(&tgtp->xmt_ls_abort);
1294 lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
1299 * lpfc_nvmet_unsol_fcp_buffer - Process an unsolicited event data buffer
1300 * @phba: pointer to lpfc hba data structure.
1301 * @pring: pointer to a SLI ring.
1302 * @nvmebuf: pointer to lpfc nvme command HBQ data structure.
1304 * This routine is used for processing the WQE associated with a unsolicited
1305 * event. It first determines whether there is an existing ndlp that matches
1306 * the DID from the unsolicited WQE. If not, it will create a new one with
1307 * the DID from the unsolicited WQE. The ELS command from the unsolicited
1308 * WQE is then used to invoke the proper routine and to set up proper state
1309 * of the discovery state machine.
1312 lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
1313 struct lpfc_sli_ring *pring,
1314 struct rqb_dmabuf *nvmebuf,
1315 uint64_t isr_timestamp)
1317 #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
1318 struct lpfc_nvmet_rcv_ctx *ctxp;
1319 struct lpfc_nvmet_tgtport *tgtp;
1320 struct fc_frame_header *fc_hdr;
1321 struct lpfc_nvmet_ctxbuf *ctx_buf;
1323 uint32_t size, oxid, sid, rc, qno;
1324 unsigned long iflag;
1325 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1330 if (!nvmebuf || !phba->targetport) {
1331 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1332 "6157 NVMET FCP Drop IO\n");
1340 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_lock, iflag);
1341 if (phba->sli4_hba.nvmet_ctx_cnt) {
1342 list_remove_head(&phba->sli4_hba.lpfc_nvmet_ctx_list,
1343 ctx_buf, struct lpfc_nvmet_ctxbuf, list);
1344 phba->sli4_hba.nvmet_ctx_cnt--;
1346 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_lock, iflag);
1348 fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt);
1349 oxid = be16_to_cpu(fc_hdr->fh_ox_id);
1350 size = nvmebuf->bytes_recv;
1352 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1353 if (phba->cpucheck_on & LPFC_CHECK_NVMET_RCV) {
1354 id = smp_processor_id();
1355 if (id < LPFC_CHECK_CPU_CNT)
1356 phba->cpucheck_rcv_io[id]++;
1360 lpfc_nvmeio_data(phba, "NVMET FCP RCV: xri x%x sz %d CPU %02x\n",
1361 oxid, size, smp_processor_id());
1364 /* Queue this NVME IO to process later */
1365 spin_lock_irqsave(&phba->sli4_hba.nvmet_io_wait_lock, iflag);
1366 list_add_tail(&nvmebuf->hbuf.list,
1367 &phba->sli4_hba.lpfc_nvmet_io_wait_list);
1368 phba->sli4_hba.nvmet_io_wait_cnt++;
1369 phba->sli4_hba.nvmet_io_wait_total++;
1370 spin_unlock_irqrestore(&phba->sli4_hba.nvmet_io_wait_lock,
1373 /* Post a brand new DMA buffer to RQ */
1375 lpfc_post_rq_buffer(
1376 phba, phba->sli4_hba.nvmet_mrq_hdr[qno],
1377 phba->sli4_hba.nvmet_mrq_data[qno], 1, qno);
1381 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1382 payload = (uint32_t *)(nvmebuf->dbuf.virt);
1383 sid = sli4_sid_from_fc_hdr(fc_hdr);
1385 ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context;
1386 memset(ctxp, 0, sizeof(ctxp->ctx));
1394 ctxp->state = LPFC_NVMET_STE_RCV;
1395 ctxp->entry_cnt = 1;
1397 ctxp->ctxbuf = ctx_buf;
1398 spin_lock_init(&ctxp->ctxlock);
1400 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
1401 if (phba->ktime_on) {
1402 ctxp->ts_isr_cmd = isr_timestamp;
1403 ctxp->ts_cmd_nvme = ktime_get_ns();
1404 ctxp->ts_nvme_data = 0;
1405 ctxp->ts_data_wqput = 0;
1406 ctxp->ts_isr_data = 0;
1407 ctxp->ts_data_nvme = 0;
1408 ctxp->ts_nvme_status = 0;
1409 ctxp->ts_status_wqput = 0;
1410 ctxp->ts_isr_status = 0;
1411 ctxp->ts_status_nvme = 0;
1415 atomic_inc(&tgtp->rcv_fcp_cmd_in);
1417 * The calling sequence should be:
1418 * nvmet_fc_rcv_fcp_req -> lpfc_nvmet_xmt_fcp_op/cmp -> req->done
1419 * lpfc_nvmet_xmt_fcp_op_cmp should free the allocated ctxp.
1420 * When we return from nvmet_fc_rcv_fcp_req, all relevant info in
1421 * the NVME command / FC header is stored, so we are free to repost
1424 rc = nvmet_fc_rcv_fcp_req(phba->targetport, &ctxp->ctx.fcp_req,
1427 /* Process FCP command */
1429 atomic_inc(&tgtp->rcv_fcp_cmd_out);
1430 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1434 atomic_inc(&tgtp->rcv_fcp_cmd_drop);
1435 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1436 "6159 FCP Drop IO x%x: err x%x: x%x x%x x%x\n",
1438 atomic_read(&tgtp->rcv_fcp_cmd_in),
1439 atomic_read(&tgtp->rcv_fcp_cmd_out),
1440 atomic_read(&tgtp->xmt_fcp_release));
1442 lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
1445 lpfc_nvmet_defer_release(phba, ctxp);
1446 lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, sid, oxid);
1447 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1452 lpfc_nvmet_ctxbuf_post(phba, ctx_buf);
1455 lpfc_rq_buf_free(phba, &nvmebuf->hbuf); /* repost */
1460 * lpfc_nvmet_unsol_ls_event - Process an unsolicited event from an nvme nport
1461 * @phba: pointer to lpfc hba data structure.
1462 * @pring: pointer to a SLI ring.
1463 * @nvmebuf: pointer to received nvme data structure.
1465 * This routine is used to process an unsolicited event received from a SLI
1466 * (Service Level Interface) ring. The actual processing of the data buffer
1467 * associated with the unsolicited event is done by invoking the routine
1468 * lpfc_nvmet_unsol_ls_buffer() after properly set up the buffer from the
1469 * SLI RQ on which the unsolicited event was received.
1472 lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1473 struct lpfc_iocbq *piocb)
1475 struct lpfc_dmabuf *d_buf;
1476 struct hbq_dmabuf *nvmebuf;
1478 d_buf = piocb->context2;
1479 nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
1481 if (phba->nvmet_support == 0) {
1482 lpfc_in_buf_free(phba, &nvmebuf->dbuf);
1485 lpfc_nvmet_unsol_ls_buffer(phba, pring, nvmebuf);
1489 * lpfc_nvmet_unsol_fcp_event - Process an unsolicited event from an nvme nport
1490 * @phba: pointer to lpfc hba data structure.
1491 * @pring: pointer to a SLI ring.
1492 * @nvmebuf: pointer to received nvme data structure.
1494 * This routine is used to process an unsolicited event received from a SLI
1495 * (Service Level Interface) ring. The actual processing of the data buffer
1496 * associated with the unsolicited event is done by invoking the routine
1497 * lpfc_nvmet_unsol_fcp_buffer() after properly set up the buffer from the
1498 * SLI RQ on which the unsolicited event was received.
1501 lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba,
1502 struct lpfc_sli_ring *pring,
1503 struct rqb_dmabuf *nvmebuf,
1504 uint64_t isr_timestamp)
1506 if (phba->nvmet_support == 0) {
1507 lpfc_rq_buf_free(phba, &nvmebuf->hbuf);
1510 lpfc_nvmet_unsol_fcp_buffer(phba, pring, nvmebuf,
1515 * lpfc_nvmet_prep_ls_wqe - Allocate and prepare a lpfc wqe data structure
1516 * @phba: pointer to a host N_Port data structure.
1517 * @ctxp: Context info for NVME LS Request
1518 * @rspbuf: DMA buffer of NVME command.
1519 * @rspsize: size of the NVME command.
1521 * This routine is used for allocating a lpfc-WQE data structure from
1522 * the driver lpfc-WQE free-list and prepare the WQE with the parameters
1523 * passed into the routine for discovery state machine to issue an Extended
1524 * Link Service (NVME) commands. It is a generic lpfc-WQE allocation
1525 * and preparation routine that is used by all the discovery state machine
1526 * routines and the NVME command-specific fields will be later set up by
1527 * the individual discovery machine routines after calling this routine
1528 * allocating and preparing a generic WQE data structure. It fills in the
1529 * Buffer Descriptor Entries (BDEs), allocates buffers for both command
1530 * payload and response payload (if expected). The reference count on the
1531 * ndlp is incremented by 1 and the reference to the ndlp is put into
1532 * context1 of the WQE data structure for this WQE to hold the ndlp
1533 * reference for the command's callback function to access later.
1536 * Pointer to the newly allocated/prepared nvme wqe data structure
1537 * NULL - when nvme wqe data structure allocation/preparation failed
1539 static struct lpfc_iocbq *
1540 lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
1541 struct lpfc_nvmet_rcv_ctx *ctxp,
1542 dma_addr_t rspbuf, uint16_t rspsize)
1544 struct lpfc_nodelist *ndlp;
1545 struct lpfc_iocbq *nvmewqe;
1546 union lpfc_wqe *wqe;
1548 if (!lpfc_is_link_up(phba)) {
1549 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1550 "6104 lpfc_nvmet_prep_ls_wqe: link err: "
1551 "NPORT x%x oxid:x%x\n",
1552 ctxp->sid, ctxp->oxid);
1556 /* Allocate buffer for command wqe */
1557 nvmewqe = lpfc_sli_get_iocbq(phba);
1558 if (nvmewqe == NULL) {
1559 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1560 "6105 lpfc_nvmet_prep_ls_wqe: No WQE: "
1561 "NPORT x%x oxid:x%x\n",
1562 ctxp->sid, ctxp->oxid);
1566 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1567 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1568 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1569 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1570 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
1571 "6106 lpfc_nvmet_prep_ls_wqe: No ndlp: "
1572 "NPORT x%x oxid:x%x\n",
1573 ctxp->sid, ctxp->oxid);
1574 goto nvme_wqe_free_wqeq_exit;
1576 ctxp->wqeq = nvmewqe;
1578 /* prevent preparing wqe with NULL ndlp reference */
1579 nvmewqe->context1 = lpfc_nlp_get(ndlp);
1580 if (nvmewqe->context1 == NULL)
1581 goto nvme_wqe_free_wqeq_exit;
1582 nvmewqe->context2 = ctxp;
1584 wqe = &nvmewqe->wqe;
1585 memset(wqe, 0, sizeof(union lpfc_wqe));
1588 wqe->xmit_sequence.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1589 wqe->xmit_sequence.bde.tus.f.bdeSize = rspsize;
1590 wqe->xmit_sequence.bde.addrLow = le32_to_cpu(putPaddrLow(rspbuf));
1591 wqe->xmit_sequence.bde.addrHigh = le32_to_cpu(putPaddrHigh(rspbuf));
1598 bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
1599 bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
1600 bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
1601 bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
1602 bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
1605 bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
1606 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1607 bf_set(wqe_xri_tag, &wqe->xmit_sequence.wqe_com, nvmewqe->sli4_xritag);
1610 bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
1611 CMD_XMIT_SEQUENCE64_WQE);
1612 bf_set(wqe_ct, &wqe->xmit_sequence.wqe_com, SLI4_CT_RPI);
1613 bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
1614 bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
1617 wqe->xmit_sequence.wqe_com.abort_tag = nvmewqe->iotag;
1620 bf_set(wqe_reqtag, &wqe->xmit_sequence.wqe_com, nvmewqe->iotag);
1621 /* Needs to be set by caller */
1622 bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ctxp->oxid);
1625 bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
1626 bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
1627 bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
1628 LPFC_WQE_LENLOC_WORD12);
1629 bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
1632 bf_set(wqe_cqid, &wqe->xmit_sequence.wqe_com,
1633 LPFC_WQE_CQ_ID_DEFAULT);
1634 bf_set(wqe_cmd_type, &wqe->xmit_sequence.wqe_com,
1638 wqe->xmit_sequence.xmit_len = rspsize;
1641 nvmewqe->vport = phba->pport;
1642 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
1643 nvmewqe->iocb_flag |= LPFC_IO_NVME_LS;
1645 /* Xmit NVME response to remote NPORT <did> */
1646 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC,
1647 "6039 Xmit NVME LS response to remote "
1648 "NPORT x%x iotag:x%x oxid:x%x size:x%x\n",
1649 ndlp->nlp_DID, nvmewqe->iotag, ctxp->oxid,
1653 nvme_wqe_free_wqeq_exit:
1654 nvmewqe->context2 = NULL;
1655 nvmewqe->context3 = NULL;
1656 lpfc_sli_release_iocbq(phba, nvmewqe);
1661 static struct lpfc_iocbq *
1662 lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
1663 struct lpfc_nvmet_rcv_ctx *ctxp)
1665 struct nvmefc_tgt_fcp_req *rsp = &ctxp->ctx.fcp_req;
1666 struct lpfc_nvmet_tgtport *tgtp;
1667 struct sli4_sge *sgl;
1668 struct lpfc_nodelist *ndlp;
1669 struct lpfc_iocbq *nvmewqe;
1670 struct scatterlist *sgel;
1671 union lpfc_wqe128 *wqe;
1673 dma_addr_t physaddr;
1677 if (!lpfc_is_link_up(phba)) {
1678 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1679 "6107 lpfc_nvmet_prep_fcp_wqe: link err:"
1680 "NPORT x%x oxid:x%x\n", ctxp->sid,
1685 ndlp = lpfc_findnode_did(phba->pport, ctxp->sid);
1686 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
1687 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
1688 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
1689 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1690 "6108 lpfc_nvmet_prep_fcp_wqe: no ndlp: "
1691 "NPORT x%x oxid:x%x\n",
1692 ctxp->sid, ctxp->oxid);
1696 if (rsp->sg_cnt > phba->cfg_nvme_seg_cnt) {
1697 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1698 "6109 lpfc_nvmet_prep_fcp_wqe: seg cnt err: "
1699 "NPORT x%x oxid:x%x cnt %d\n",
1700 ctxp->sid, ctxp->oxid, phba->cfg_nvme_seg_cnt);
1704 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1705 nvmewqe = ctxp->wqeq;
1706 if (nvmewqe == NULL) {
1707 /* Allocate buffer for command wqe */
1708 nvmewqe = ctxp->ctxbuf->iocbq;
1709 if (nvmewqe == NULL) {
1710 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1711 "6110 lpfc_nvmet_prep_fcp_wqe: No "
1712 "WQE: NPORT x%x oxid:x%x\n",
1713 ctxp->sid, ctxp->oxid);
1716 ctxp->wqeq = nvmewqe;
1717 xc = 0; /* create new XRI */
1718 nvmewqe->sli4_lxritag = NO_XRI;
1719 nvmewqe->sli4_xritag = NO_XRI;
1723 if (((ctxp->state == LPFC_NVMET_STE_RCV) &&
1724 (ctxp->entry_cnt == 1)) ||
1725 ((ctxp->state == LPFC_NVMET_STE_DATA) &&
1726 (ctxp->entry_cnt > 1))) {
1727 wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
1729 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1730 "6111 Wrong state %s: %d cnt %d\n",
1731 __func__, ctxp->state, ctxp->entry_cnt);
1735 sgl = (struct sli4_sge *)ctxp->ctxbuf->sglq->sgl;
1737 case NVMET_FCOP_READDATA:
1738 case NVMET_FCOP_READDATA_RSP:
1739 /* Words 0 - 2 : The first sg segment */
1741 physaddr = sg_dma_address(sgel);
1742 wqe->fcp_tsend.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1743 wqe->fcp_tsend.bde.tus.f.bdeSize = sg_dma_len(sgel);
1744 wqe->fcp_tsend.bde.addrLow = cpu_to_le32(putPaddrLow(physaddr));
1745 wqe->fcp_tsend.bde.addrHigh =
1746 cpu_to_le32(putPaddrHigh(physaddr));
1749 wqe->fcp_tsend.payload_offset_len = 0;
1752 wqe->fcp_tsend.relative_offset = ctxp->offset;
1757 bf_set(wqe_ctxt_tag, &wqe->fcp_tsend.wqe_com,
1758 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1759 bf_set(wqe_xri_tag, &wqe->fcp_tsend.wqe_com,
1760 nvmewqe->sli4_xritag);
1763 bf_set(wqe_cmnd, &wqe->fcp_tsend.wqe_com, CMD_FCP_TSEND64_WQE);
1766 wqe->fcp_tsend.wqe_com.abort_tag = nvmewqe->iotag;
1769 bf_set(wqe_reqtag, &wqe->fcp_tsend.wqe_com, nvmewqe->iotag);
1770 bf_set(wqe_rcvoxid, &wqe->fcp_tsend.wqe_com, ctxp->oxid);
1773 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1774 bf_set(wqe_dbde, &wqe->fcp_tsend.wqe_com, 1);
1775 bf_set(wqe_iod, &wqe->fcp_tsend.wqe_com, LPFC_WQE_IOD_WRITE);
1776 bf_set(wqe_lenloc, &wqe->fcp_tsend.wqe_com,
1777 LPFC_WQE_LENLOC_WORD12);
1778 bf_set(wqe_ebde_cnt, &wqe->fcp_tsend.wqe_com, 0);
1779 bf_set(wqe_xc, &wqe->fcp_tsend.wqe_com, xc);
1780 bf_set(wqe_nvme, &wqe->fcp_tsend.wqe_com, 1);
1781 if (phba->cfg_nvme_oas)
1782 bf_set(wqe_oas, &wqe->fcp_tsend.wqe_com, 1);
1785 bf_set(wqe_cqid, &wqe->fcp_tsend.wqe_com,
1786 LPFC_WQE_CQ_ID_DEFAULT);
1787 bf_set(wqe_cmd_type, &wqe->fcp_tsend.wqe_com,
1791 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1793 /* Setup 2 SKIP SGEs */
1797 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1798 sgl->word2 = cpu_to_le32(sgl->word2);
1804 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1805 sgl->word2 = cpu_to_le32(sgl->word2);
1808 if (rsp->op == NVMET_FCOP_READDATA_RSP) {
1809 atomic_inc(&tgtp->xmt_fcp_read_rsp);
1810 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 1);
1811 if ((ndlp->nlp_flag & NLP_SUPPRESS_RSP) &&
1812 (rsp->rsplen == 12)) {
1813 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1);
1814 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1815 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1816 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1818 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1819 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 1);
1820 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 1);
1821 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com,
1822 ((rsp->rsplen >> 2) - 1));
1823 memcpy(&wqe->words[16], rsp->rspaddr,
1827 atomic_inc(&tgtp->xmt_fcp_read);
1829 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1830 bf_set(wqe_wqes, &wqe->fcp_tsend.wqe_com, 0);
1831 bf_set(wqe_irsp, &wqe->fcp_tsend.wqe_com, 0);
1832 bf_set(wqe_ar, &wqe->fcp_tsend.wqe_com, 0);
1833 bf_set(wqe_irsplen, &wqe->fcp_tsend.wqe_com, 0);
1835 ctxp->state = LPFC_NVMET_STE_DATA;
1838 case NVMET_FCOP_WRITEDATA:
1839 /* Words 0 - 2 : The first sg segment */
1840 txrdy = pci_pool_alloc(phba->txrdy_payload_pool,
1841 GFP_KERNEL, &physaddr);
1843 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
1844 "6041 Bad txrdy buffer: oxid x%x\n",
1848 ctxp->txrdy = txrdy;
1849 ctxp->txrdy_phys = physaddr;
1850 wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1851 wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN;
1852 wqe->fcp_treceive.bde.addrLow =
1853 cpu_to_le32(putPaddrLow(physaddr));
1854 wqe->fcp_treceive.bde.addrHigh =
1855 cpu_to_le32(putPaddrHigh(physaddr));
1858 wqe->fcp_treceive.payload_offset_len = TXRDY_PAYLOAD_LEN;
1861 wqe->fcp_treceive.relative_offset = ctxp->offset;
1866 bf_set(wqe_ctxt_tag, &wqe->fcp_treceive.wqe_com,
1867 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1868 bf_set(wqe_xri_tag, &wqe->fcp_treceive.wqe_com,
1869 nvmewqe->sli4_xritag);
1872 bf_set(wqe_ar, &wqe->fcp_treceive.wqe_com, 0);
1873 bf_set(wqe_cmnd, &wqe->fcp_treceive.wqe_com,
1874 CMD_FCP_TRECEIVE64_WQE);
1877 wqe->fcp_treceive.wqe_com.abort_tag = nvmewqe->iotag;
1880 bf_set(wqe_reqtag, &wqe->fcp_treceive.wqe_com, nvmewqe->iotag);
1881 bf_set(wqe_rcvoxid, &wqe->fcp_treceive.wqe_com, ctxp->oxid);
1884 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1885 bf_set(wqe_dbde, &wqe->fcp_treceive.wqe_com, 1);
1886 bf_set(wqe_iod, &wqe->fcp_treceive.wqe_com, LPFC_WQE_IOD_READ);
1887 bf_set(wqe_lenloc, &wqe->fcp_treceive.wqe_com,
1888 LPFC_WQE_LENLOC_WORD12);
1889 bf_set(wqe_xc, &wqe->fcp_treceive.wqe_com, xc);
1890 bf_set(wqe_wqes, &wqe->fcp_treceive.wqe_com, 0);
1891 bf_set(wqe_irsp, &wqe->fcp_treceive.wqe_com, 0);
1892 bf_set(wqe_irsplen, &wqe->fcp_treceive.wqe_com, 0);
1893 bf_set(wqe_nvme, &wqe->fcp_treceive.wqe_com, 1);
1894 if (phba->cfg_nvme_oas)
1895 bf_set(wqe_oas, &wqe->fcp_treceive.wqe_com, 1);
1898 bf_set(wqe_cqid, &wqe->fcp_treceive.wqe_com,
1899 LPFC_WQE_CQ_ID_DEFAULT);
1900 bf_set(wqe_cmd_type, &wqe->fcp_treceive.wqe_com,
1901 FCP_COMMAND_TRECEIVE);
1902 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1905 wqe->fcp_tsend.fcp_data_len = rsp->transfer_length;
1907 /* Setup 1 TXRDY and 1 SKIP SGE */
1909 txrdy[1] = cpu_to_be32(rsp->transfer_length);
1912 sgl->addr_hi = putPaddrHigh(physaddr);
1913 sgl->addr_lo = putPaddrLow(physaddr);
1915 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
1916 sgl->word2 = cpu_to_le32(sgl->word2);
1917 sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN);
1922 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP);
1923 sgl->word2 = cpu_to_le32(sgl->word2);
1926 ctxp->state = LPFC_NVMET_STE_DATA;
1927 atomic_inc(&tgtp->xmt_fcp_write);
1930 case NVMET_FCOP_RSP:
1932 physaddr = rsp->rspdma;
1933 wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1934 wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
1935 wqe->fcp_trsp.bde.addrLow =
1936 cpu_to_le32(putPaddrLow(physaddr));
1937 wqe->fcp_trsp.bde.addrHigh =
1938 cpu_to_le32(putPaddrHigh(physaddr));
1941 wqe->fcp_trsp.response_len = rsp->rsplen;
1944 wqe->fcp_trsp.rsvd_4_5[0] = 0;
1950 bf_set(wqe_ctxt_tag, &wqe->fcp_trsp.wqe_com,
1951 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
1952 bf_set(wqe_xri_tag, &wqe->fcp_trsp.wqe_com,
1953 nvmewqe->sli4_xritag);
1956 bf_set(wqe_ag, &wqe->fcp_trsp.wqe_com, 1);
1957 bf_set(wqe_cmnd, &wqe->fcp_trsp.wqe_com, CMD_FCP_TRSP64_WQE);
1960 wqe->fcp_trsp.wqe_com.abort_tag = nvmewqe->iotag;
1963 bf_set(wqe_reqtag, &wqe->fcp_trsp.wqe_com, nvmewqe->iotag);
1964 bf_set(wqe_rcvoxid, &wqe->fcp_trsp.wqe_com, ctxp->oxid);
1967 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1968 bf_set(wqe_dbde, &wqe->fcp_trsp.wqe_com, 0);
1969 bf_set(wqe_iod, &wqe->fcp_trsp.wqe_com, LPFC_WQE_IOD_WRITE);
1970 bf_set(wqe_lenloc, &wqe->fcp_trsp.wqe_com,
1971 LPFC_WQE_LENLOC_WORD3);
1972 bf_set(wqe_xc, &wqe->fcp_trsp.wqe_com, xc);
1973 bf_set(wqe_nvme, &wqe->fcp_trsp.wqe_com, 1);
1974 if (phba->cfg_nvme_oas)
1975 bf_set(wqe_oas, &wqe->fcp_trsp.wqe_com, 1);
1978 bf_set(wqe_cqid, &wqe->fcp_trsp.wqe_com,
1979 LPFC_WQE_CQ_ID_DEFAULT);
1980 bf_set(wqe_cmd_type, &wqe->fcp_trsp.wqe_com,
1982 bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 0);
1983 ctxp->state = LPFC_NVMET_STE_RSP;
1985 if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) {
1986 /* Good response - all zero's on wire */
1987 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 0);
1988 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 0);
1989 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com, 0);
1991 bf_set(wqe_wqes, &wqe->fcp_trsp.wqe_com, 1);
1992 bf_set(wqe_irsp, &wqe->fcp_trsp.wqe_com, 1);
1993 bf_set(wqe_irsplen, &wqe->fcp_trsp.wqe_com,
1994 ((rsp->rsplen >> 2) - 1));
1995 memcpy(&wqe->words[16], rsp->rspaddr, rsp->rsplen);
1998 /* Use rspbuf, NOT sg list */
2001 atomic_inc(&tgtp->xmt_fcp_rsp);
2005 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
2006 "6064 Unknown Rsp Op %d\n",
2012 nvmewqe->vport = phba->pport;
2013 nvmewqe->drvrTimeout = (phba->fc_ratov * 3) + LPFC_DRVR_TIMEOUT;
2014 nvmewqe->context1 = ndlp;
2016 for (i = 0; i < rsp->sg_cnt; i++) {
2018 physaddr = sg_dma_address(sgel);
2019 cnt = sg_dma_len(sgel);
2020 sgl->addr_hi = putPaddrHigh(physaddr);
2021 sgl->addr_lo = putPaddrLow(physaddr);
2023 bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA);
2024 bf_set(lpfc_sli4_sge_offset, sgl, ctxp->offset);
2025 if ((i+1) == rsp->sg_cnt)
2026 bf_set(lpfc_sli4_sge_last, sgl, 1);
2027 sgl->word2 = cpu_to_le32(sgl->word2);
2028 sgl->sge_len = cpu_to_le32(cnt);
2030 ctxp->offset += cnt;
2036 * lpfc_nvmet_sol_fcp_abort_cmp - Completion handler for ABTS
2037 * @phba: Pointer to HBA context object.
2038 * @cmdwqe: Pointer to driver command WQE object.
2039 * @wcqe: Pointer to driver response CQE object.
2041 * The function is called from SLI ring event handler with no
2042 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2043 * The function frees memory resources used for the NVME commands.
2046 lpfc_nvmet_sol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2047 struct lpfc_wcqe_complete *wcqe)
2049 struct lpfc_nvmet_rcv_ctx *ctxp;
2050 struct lpfc_nvmet_tgtport *tgtp;
2051 uint32_t status, result;
2052 unsigned long flags;
2053 bool released = false;
2055 ctxp = cmdwqe->context2;
2056 status = bf_get(lpfc_wcqe_c_status, wcqe);
2057 result = wcqe->parameter;
2059 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2060 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2061 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2063 ctxp->state = LPFC_NVMET_STE_DONE;
2065 /* Check if we already received a free context call
2066 * and we have completed processing an abort situation.
2068 spin_lock_irqsave(&ctxp->ctxlock, flags);
2069 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2070 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2071 list_del(&ctxp->list);
2074 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2075 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2076 atomic_inc(&tgtp->xmt_abort_rsp);
2078 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2079 "6165 ABORT cmpl: xri x%x flg x%x (%d) "
2080 "WCQE: %08x %08x %08x %08x\n",
2081 ctxp->oxid, ctxp->flag, released,
2082 wcqe->word0, wcqe->total_data_placed,
2083 result, wcqe->word3);
2085 cmdwqe->context2 = NULL;
2086 cmdwqe->context3 = NULL;
2088 * if transport has released ctx, then can reuse it. Otherwise,
2089 * will be recycled by transport release call.
2092 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2094 /* This is the iocbq for the abort, not the command */
2095 lpfc_sli_release_iocbq(phba, cmdwqe);
2097 /* Since iaab/iaar are NOT set, there is no work left.
2098 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2099 * should have been called already.
2104 * lpfc_nvmet_unsol_fcp_abort_cmp - Completion handler for ABTS
2105 * @phba: Pointer to HBA context object.
2106 * @cmdwqe: Pointer to driver command WQE object.
2107 * @wcqe: Pointer to driver response CQE object.
2109 * The function is called from SLI ring event handler with no
2110 * lock held. This function is the completion handler for NVME ABTS for FCP cmds
2111 * The function frees memory resources used for the NVME commands.
2114 lpfc_nvmet_unsol_fcp_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2115 struct lpfc_wcqe_complete *wcqe)
2117 struct lpfc_nvmet_rcv_ctx *ctxp;
2118 struct lpfc_nvmet_tgtport *tgtp;
2119 unsigned long flags;
2120 uint32_t status, result;
2121 bool released = false;
2123 ctxp = cmdwqe->context2;
2124 status = bf_get(lpfc_wcqe_c_status, wcqe);
2125 result = wcqe->parameter;
2127 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2128 if (ctxp->flag & LPFC_NVMET_ABORT_OP)
2129 atomic_inc(&tgtp->xmt_fcp_abort_cmpl);
2132 /* if context is clear, related io alrady complete */
2133 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2134 "6070 ABTS cmpl: WCQE: %08x %08x %08x %08x\n",
2135 wcqe->word0, wcqe->total_data_placed,
2136 result, wcqe->word3);
2141 if (ctxp->state != LPFC_NVMET_STE_ABORT) {
2142 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2143 "6112 ABTS Wrong state:%d oxid x%x\n",
2144 ctxp->state, ctxp->oxid);
2147 /* Check if we already received a free context call
2148 * and we have completed processing an abort situation.
2150 ctxp->state = LPFC_NVMET_STE_DONE;
2151 spin_lock_irqsave(&ctxp->ctxlock, flags);
2152 if ((ctxp->flag & LPFC_NVMET_CTX_RLS) &&
2153 !(ctxp->flag & LPFC_NVMET_XBUSY)) {
2154 list_del(&ctxp->list);
2157 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2158 spin_unlock_irqrestore(&ctxp->ctxlock, flags);
2159 atomic_inc(&tgtp->xmt_abort_rsp);
2161 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2162 "6316 ABTS cmpl xri x%x flg x%x (%x) "
2163 "WCQE: %08x %08x %08x %08x\n",
2164 ctxp->oxid, ctxp->flag, released,
2165 wcqe->word0, wcqe->total_data_placed,
2166 result, wcqe->word3);
2168 cmdwqe->context2 = NULL;
2169 cmdwqe->context3 = NULL;
2171 * if transport has released ctx, then can reuse it. Otherwise,
2172 * will be recycled by transport release call.
2175 lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
2177 /* Since iaab/iaar are NOT set, there is no work left.
2178 * For LPFC_NVMET_XBUSY, lpfc_sli4_nvmet_xri_aborted
2179 * should have been called already.
2184 * lpfc_nvmet_xmt_ls_abort_cmp - Completion handler for ABTS
2185 * @phba: Pointer to HBA context object.
2186 * @cmdwqe: Pointer to driver command WQE object.
2187 * @wcqe: Pointer to driver response CQE object.
2189 * The function is called from SLI ring event handler with no
2190 * lock held. This function is the completion handler for NVME ABTS for LS cmds
2191 * The function frees memory resources used for the NVME commands.
2194 lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe,
2195 struct lpfc_wcqe_complete *wcqe)
2197 struct lpfc_nvmet_rcv_ctx *ctxp;
2198 struct lpfc_nvmet_tgtport *tgtp;
2199 uint32_t status, result;
2201 ctxp = cmdwqe->context2;
2202 status = bf_get(lpfc_wcqe_c_status, wcqe);
2203 result = wcqe->parameter;
2205 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2206 atomic_inc(&tgtp->xmt_ls_abort_cmpl);
2208 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2209 "6083 Abort cmpl: ctx %p WCQE: %08x %08x %08x %08x\n",
2210 ctxp, wcqe->word0, wcqe->total_data_placed,
2211 result, wcqe->word3);
2214 cmdwqe->context2 = NULL;
2215 cmdwqe->context3 = NULL;
2216 lpfc_sli_release_iocbq(phba, cmdwqe);
2219 lpfc_sli_release_iocbq(phba, cmdwqe);
2223 lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
2224 struct lpfc_nvmet_rcv_ctx *ctxp,
2225 uint32_t sid, uint16_t xri)
2227 struct lpfc_nvmet_tgtport *tgtp;
2228 struct lpfc_iocbq *abts_wqeq;
2229 union lpfc_wqe *wqe_abts;
2230 struct lpfc_nodelist *ndlp;
2232 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2233 "6067 ABTS: sid %x xri x%x/x%x\n",
2234 sid, xri, ctxp->wqeq->sli4_xritag);
2236 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2238 ndlp = lpfc_findnode_did(phba->pport, sid);
2239 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2240 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2241 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2242 atomic_inc(&tgtp->xmt_abort_rsp_error);
2243 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2244 "6134 Drop ABTS - wrong NDLP state x%x.\n",
2245 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2247 /* No failure to an ABTS request. */
2251 abts_wqeq = ctxp->wqeq;
2252 wqe_abts = &abts_wqeq->wqe;
2253 ctxp->state = LPFC_NVMET_STE_ABORT;
2256 * Since we zero the whole WQE, we need to ensure we set the WQE fields
2257 * that were initialized in lpfc_sli4_nvmet_alloc.
2259 memset(wqe_abts, 0, sizeof(union lpfc_wqe));
2262 bf_set(wqe_dfctl, &wqe_abts->xmit_sequence.wge_ctl, 0);
2263 bf_set(wqe_ls, &wqe_abts->xmit_sequence.wge_ctl, 1);
2264 bf_set(wqe_la, &wqe_abts->xmit_sequence.wge_ctl, 0);
2265 bf_set(wqe_rctl, &wqe_abts->xmit_sequence.wge_ctl, FC_RCTL_BA_ABTS);
2266 bf_set(wqe_type, &wqe_abts->xmit_sequence.wge_ctl, FC_TYPE_BLS);
2269 bf_set(wqe_ctxt_tag, &wqe_abts->xmit_sequence.wqe_com,
2270 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
2271 bf_set(wqe_xri_tag, &wqe_abts->xmit_sequence.wqe_com,
2272 abts_wqeq->sli4_xritag);
2275 bf_set(wqe_cmnd, &wqe_abts->xmit_sequence.wqe_com,
2276 CMD_XMIT_SEQUENCE64_WQE);
2277 bf_set(wqe_ct, &wqe_abts->xmit_sequence.wqe_com, SLI4_CT_RPI);
2278 bf_set(wqe_class, &wqe_abts->xmit_sequence.wqe_com, CLASS3);
2279 bf_set(wqe_pu, &wqe_abts->xmit_sequence.wqe_com, 0);
2282 wqe_abts->xmit_sequence.wqe_com.abort_tag = abts_wqeq->iotag;
2285 bf_set(wqe_reqtag, &wqe_abts->xmit_sequence.wqe_com, abts_wqeq->iotag);
2286 /* Needs to be set by caller */
2287 bf_set(wqe_rcvoxid, &wqe_abts->xmit_sequence.wqe_com, xri);
2290 bf_set(wqe_dbde, &wqe_abts->xmit_sequence.wqe_com, 1);
2291 bf_set(wqe_iod, &wqe_abts->xmit_sequence.wqe_com, LPFC_WQE_IOD_WRITE);
2292 bf_set(wqe_lenloc, &wqe_abts->xmit_sequence.wqe_com,
2293 LPFC_WQE_LENLOC_WORD12);
2294 bf_set(wqe_ebde_cnt, &wqe_abts->xmit_sequence.wqe_com, 0);
2295 bf_set(wqe_qosd, &wqe_abts->xmit_sequence.wqe_com, 0);
2298 bf_set(wqe_cqid, &wqe_abts->xmit_sequence.wqe_com,
2299 LPFC_WQE_CQ_ID_DEFAULT);
2300 bf_set(wqe_cmd_type, &wqe_abts->xmit_sequence.wqe_com,
2303 abts_wqeq->vport = phba->pport;
2304 abts_wqeq->context1 = ndlp;
2305 abts_wqeq->context2 = ctxp;
2306 abts_wqeq->context3 = NULL;
2307 abts_wqeq->rsvd2 = 0;
2308 /* hba_wqidx should already be setup from command we are aborting */
2309 abts_wqeq->iocb.ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2310 abts_wqeq->iocb.ulpLe = 1;
2312 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2313 "6069 Issue ABTS to xri x%x reqtag x%x\n",
2314 xri, abts_wqeq->iotag);
2319 lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
2320 struct lpfc_nvmet_rcv_ctx *ctxp,
2321 uint32_t sid, uint16_t xri)
2323 struct lpfc_nvmet_tgtport *tgtp;
2324 struct lpfc_iocbq *abts_wqeq;
2325 union lpfc_wqe *abts_wqe;
2326 struct lpfc_nodelist *ndlp;
2327 unsigned long flags;
2330 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2332 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2333 ctxp->wqeq->hba_wqidx = 0;
2336 ndlp = lpfc_findnode_did(phba->pport, sid);
2337 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
2338 ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
2339 (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
2340 atomic_inc(&tgtp->xmt_abort_rsp_error);
2341 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2342 "6160 Drop ABORT - wrong NDLP state x%x.\n",
2343 (ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
2345 /* No failure to an ABTS request. */
2346 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2350 /* Issue ABTS for this WQE based on iotag */
2351 ctxp->abort_wqeq = lpfc_sli_get_iocbq(phba);
2352 if (!ctxp->abort_wqeq) {
2353 atomic_inc(&tgtp->xmt_abort_rsp_error);
2354 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2355 "6161 ABORT failed: No wqeqs: "
2356 "xri: x%x\n", ctxp->oxid);
2357 /* No failure to an ABTS request. */
2358 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2361 abts_wqeq = ctxp->abort_wqeq;
2362 abts_wqe = &abts_wqeq->wqe;
2363 ctxp->state = LPFC_NVMET_STE_ABORT;
2365 /* Announce entry to new IO submit field. */
2366 lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
2367 "6162 ABORT Request to rport DID x%06x "
2368 "for xri x%x x%x\n",
2369 ctxp->sid, ctxp->oxid, ctxp->wqeq->sli4_xritag);
2371 /* If the hba is getting reset, this flag is set. It is
2372 * cleared when the reset is complete and rings reestablished.
2374 spin_lock_irqsave(&phba->hbalock, flags);
2375 /* driver queued commands are in process of being flushed */
2376 if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) {
2377 spin_unlock_irqrestore(&phba->hbalock, flags);
2378 atomic_inc(&tgtp->xmt_abort_rsp_error);
2379 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2380 "6163 Driver in reset cleanup - flushing "
2381 "NVME Req now. hba_flag x%x oxid x%x\n",
2382 phba->hba_flag, ctxp->oxid);
2383 lpfc_sli_release_iocbq(phba, abts_wqeq);
2384 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2388 /* Outstanding abort is in progress */
2389 if (abts_wqeq->iocb_flag & LPFC_DRIVER_ABORTED) {
2390 spin_unlock_irqrestore(&phba->hbalock, flags);
2391 atomic_inc(&tgtp->xmt_abort_rsp_error);
2392 lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
2393 "6164 Outstanding NVME I/O Abort Request "
2394 "still pending on oxid x%x\n",
2396 lpfc_sli_release_iocbq(phba, abts_wqeq);
2397 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2401 /* Ready - mark outstanding as aborted by driver. */
2402 abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED;
2404 /* WQEs are reused. Clear stale data and set key fields to
2405 * zero like ia, iaab, iaar, xri_tag, and ctxt_tag.
2407 memset(abts_wqe, 0, sizeof(union lpfc_wqe));
2410 bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG);
2413 bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0);
2414 bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
2416 /* word 8 - tell the FW to abort the IO associated with this
2417 * outstanding exchange ID.
2419 abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag;
2421 /* word 9 - this is the iotag for the abts_wqe completion. */
2422 bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com,
2426 bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1);
2427 bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
2430 bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND);
2431 bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1);
2432 bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
2434 /* ABTS WQE must go to the same WQ as the WQE to be aborted */
2435 abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx;
2436 abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp;
2437 abts_wqeq->iocb_cmpl = 0;
2438 abts_wqeq->iocb_flag |= LPFC_IO_NVME;
2439 abts_wqeq->context2 = ctxp;
2440 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2441 spin_unlock_irqrestore(&phba->hbalock, flags);
2442 if (rc == WQE_SUCCESS) {
2443 atomic_inc(&tgtp->xmt_abort_sol);
2447 atomic_inc(&tgtp->xmt_abort_rsp_error);
2448 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2449 lpfc_sli_release_iocbq(phba, abts_wqeq);
2450 lpfc_printf_log(phba, KERN_ERR, LOG_NVME_ABTS,
2451 "6166 Failed ABORT issue_wqe with status x%x "
2459 lpfc_nvmet_unsol_fcp_issue_abort(struct lpfc_hba *phba,
2460 struct lpfc_nvmet_rcv_ctx *ctxp,
2461 uint32_t sid, uint16_t xri)
2463 struct lpfc_nvmet_tgtport *tgtp;
2464 struct lpfc_iocbq *abts_wqeq;
2465 unsigned long flags;
2468 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2470 ctxp->wqeq = ctxp->ctxbuf->iocbq;
2471 ctxp->wqeq->hba_wqidx = 0;
2474 rc = lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2478 spin_lock_irqsave(&phba->hbalock, flags);
2479 abts_wqeq = ctxp->wqeq;
2480 abts_wqeq->wqe_cmpl = lpfc_nvmet_unsol_fcp_abort_cmp;
2481 abts_wqeq->iocb_cmpl = NULL;
2482 abts_wqeq->iocb_flag |= LPFC_IO_NVMET;
2483 rc = lpfc_sli4_issue_wqe(phba, LPFC_FCP_RING, abts_wqeq);
2484 spin_unlock_irqrestore(&phba->hbalock, flags);
2485 if (rc == WQE_SUCCESS) {
2490 atomic_inc(&tgtp->xmt_abort_rsp_error);
2491 ctxp->flag &= ~LPFC_NVMET_ABORT_OP;
2492 atomic_inc(&tgtp->xmt_abort_rsp_error);
2493 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2494 "6135 Failed to Issue ABTS for oxid x%x. Status x%x\n",
2500 lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba,
2501 struct lpfc_nvmet_rcv_ctx *ctxp,
2502 uint32_t sid, uint16_t xri)
2504 struct lpfc_nvmet_tgtport *tgtp;
2505 struct lpfc_iocbq *abts_wqeq;
2506 union lpfc_wqe *wqe_abts;
2507 unsigned long flags;
2510 tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
2512 /* Issue ABTS for this WQE based on iotag */
2513 ctxp->wqeq = lpfc_sli_get_iocbq(phba);
2515 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2516 "6068 Abort failed: No wqeqs: "
2518 /* No failure to an ABTS request. */
2523 abts_wqeq = ctxp->wqeq;
2524 wqe_abts = &abts_wqeq->wqe;
2526 lpfc_nvmet_unsol_issue_abort(phba, ctxp, sid, xri);
2528 spin_lock_irqsave(&phba->hbalock, flags);
2529 abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp;
2530 abts_wqeq->iocb_cmpl = 0;
2531 abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS;
2532 rc = lpfc_sli4_issue_wqe(phba, LPFC_ELS_RING, abts_wqeq);
2533 spin_unlock_irqrestore(&phba->hbalock, flags);
2534 if (rc == WQE_SUCCESS) {
2535 atomic_inc(&tgtp->xmt_abort_unsol);
2539 atomic_inc(&tgtp->xmt_abort_rsp_error);
2540 abts_wqeq->context2 = NULL;
2541 abts_wqeq->context3 = NULL;
2542 lpfc_sli_release_iocbq(phba, abts_wqeq);
2544 lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
2545 "6056 Failed to Issue ABTS. Status x%x\n", rc);