1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2015 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
27 #include <linux/bsg-lib.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 #include <scsi/scsi_bsg_fc.h>
33 #include <scsi/fc/fc_fs.h>
38 #include "lpfc_sli4.h"
41 #include "lpfc_disc.h"
42 #include "lpfc_scsi.h"
44 #include "lpfc_logmsg.h"
45 #include "lpfc_crtn.h"
46 #include "lpfc_debugfs.h"
47 #include "lpfc_vport.h"
48 #include "lpfc_version.h"
50 struct lpfc_bsg_event {
51 struct list_head node;
55 /* Event type and waiter identifiers */
60 /* next two flags are here for the auto-delete logic */
61 unsigned long wait_time_stamp;
64 /* seen and not seen events */
65 struct list_head events_to_get;
66 struct list_head events_to_see;
68 /* driver data associated with the job */
72 struct lpfc_bsg_iocb {
73 struct lpfc_iocbq *cmdiocbq;
74 struct lpfc_dmabuf *rmp;
75 struct lpfc_nodelist *ndlp;
78 struct lpfc_bsg_mbox {
81 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
82 uint8_t *ext; /* extended mailbox data */
83 uint32_t mbOffset; /* from app */
84 uint32_t inExtWLen; /* from app */
85 uint32_t outExtWLen; /* from app */
88 #define MENLO_DID 0x0000FC0E
90 struct lpfc_bsg_menlo {
91 struct lpfc_iocbq *cmdiocbq;
92 struct lpfc_dmabuf *rmp;
101 struct bsg_job *set_job; /* job waiting for this iocb to finish */
103 struct lpfc_bsg_event *evt;
104 struct lpfc_bsg_iocb iocb;
105 struct lpfc_bsg_mbox mbox;
106 struct lpfc_bsg_menlo menlo;
111 struct list_head node;
118 #define BUF_SZ_4K 4096
119 #define SLI_CT_ELX_LOOPBACK 0x10
121 enum ELX_LOOPBACK_CMD {
122 ELX_LOOPBACK_XRI_SETUP,
126 #define ELX_LOOPBACK_HEADER_SZ \
127 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
129 struct lpfc_dmabufext {
130 struct lpfc_dmabuf dma;
136 lpfc_free_bsg_buffers(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist)
138 struct lpfc_dmabuf *mlast, *next_mlast;
141 list_for_each_entry_safe(mlast, next_mlast, &mlist->list,
143 lpfc_mbuf_free(phba, mlast->virt, mlast->phys);
144 list_del(&mlast->list);
147 lpfc_mbuf_free(phba, mlist->virt, mlist->phys);
153 static struct lpfc_dmabuf *
154 lpfc_alloc_bsg_buffers(struct lpfc_hba *phba, unsigned int size,
155 int outbound_buffers, struct ulp_bde64 *bpl,
158 struct lpfc_dmabuf *mlist = NULL;
159 struct lpfc_dmabuf *mp;
160 unsigned int bytes_left = size;
162 /* Verify we can support the size specified */
163 if (!size || (size > (*bpl_entries * LPFC_BPL_SIZE)))
166 /* Determine the number of dma buffers to allocate */
167 *bpl_entries = (size % LPFC_BPL_SIZE ? size/LPFC_BPL_SIZE + 1 :
170 /* Allocate dma buffer and place in BPL passed */
172 /* Allocate dma buffer */
173 mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
176 lpfc_free_bsg_buffers(phba, mlist);
180 INIT_LIST_HEAD(&mp->list);
181 mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys));
186 lpfc_free_bsg_buffers(phba, mlist);
190 /* Queue it to a linked list */
194 list_add_tail(&mp->list, &mlist->list);
196 /* Add buffer to buffer pointer list */
197 if (outbound_buffers)
198 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
200 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
201 bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys));
202 bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys));
203 bpl->tus.f.bdeSize = (uint16_t)
204 (bytes_left >= LPFC_BPL_SIZE ? LPFC_BPL_SIZE :
206 bytes_left -= bpl->tus.f.bdeSize;
207 bpl->tus.w = le32_to_cpu(bpl->tus.w);
214 lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers,
215 struct bsg_buffer *bsg_buffers,
216 unsigned int bytes_to_transfer, int to_buffers)
219 struct lpfc_dmabuf *mp;
220 unsigned int transfer_bytes, bytes_copied = 0;
221 unsigned int sg_offset, dma_offset;
222 unsigned char *dma_address, *sg_address;
223 LIST_HEAD(temp_list);
224 struct sg_mapping_iter miter;
226 unsigned int sg_flags = SG_MITER_ATOMIC;
229 list_splice_init(&dma_buffers->list, &temp_list);
230 list_add(&dma_buffers->list, &temp_list);
233 sg_flags |= SG_MITER_FROM_SG;
235 sg_flags |= SG_MITER_TO_SG;
236 sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt,
238 local_irq_save(flags);
239 sg_valid = sg_miter_next(&miter);
240 list_for_each_entry(mp, &temp_list, list) {
242 while (bytes_to_transfer && sg_valid &&
243 (dma_offset < LPFC_BPL_SIZE)) {
244 dma_address = mp->virt + dma_offset;
246 /* Continue previous partial transfer of sg */
247 sg_address = miter.addr + sg_offset;
248 transfer_bytes = miter.length - sg_offset;
250 sg_address = miter.addr;
251 transfer_bytes = miter.length;
253 if (bytes_to_transfer < transfer_bytes)
254 transfer_bytes = bytes_to_transfer;
255 if (transfer_bytes > (LPFC_BPL_SIZE - dma_offset))
256 transfer_bytes = LPFC_BPL_SIZE - dma_offset;
258 memcpy(dma_address, sg_address, transfer_bytes);
260 memcpy(sg_address, dma_address, transfer_bytes);
261 dma_offset += transfer_bytes;
262 sg_offset += transfer_bytes;
263 bytes_to_transfer -= transfer_bytes;
264 bytes_copied += transfer_bytes;
265 if (sg_offset >= miter.length) {
267 sg_valid = sg_miter_next(&miter);
271 sg_miter_stop(&miter);
272 local_irq_restore(flags);
273 list_del_init(&dma_buffers->list);
274 list_splice(&temp_list, &dma_buffers->list);
279 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
280 * @phba: Pointer to HBA context object.
281 * @cmdiocbq: Pointer to command iocb.
282 * @rspiocbq: Pointer to response iocb.
284 * This function is the completion handler for iocbs issued using
285 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
286 * ring event handler function without any lock held. This function
287 * can be called from both worker thread context and interrupt
288 * context. This function also can be called from another thread which
289 * cleans up the SLI layer objects.
290 * This function copies the contents of the response iocb to the
291 * response iocb memory object provided by the caller of
292 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
293 * sleeps for the iocb completion.
296 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
297 struct lpfc_iocbq *cmdiocbq,
298 struct lpfc_iocbq *rspiocbq)
300 struct bsg_job_data *dd_data;
302 struct fc_bsg_reply *bsg_reply;
304 struct lpfc_dmabuf *bmp, *cmp, *rmp;
305 struct lpfc_nodelist *ndlp;
306 struct lpfc_bsg_iocb *iocb;
308 unsigned int rsp_size;
311 dd_data = cmdiocbq->context1;
313 /* Determine if job has been aborted */
314 spin_lock_irqsave(&phba->ct_ev_lock, flags);
315 job = dd_data->set_job;
317 bsg_reply = job->reply;
318 /* Prevent timeout handling from trying to abort job */
321 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
323 /* Close the timeout handler abort window */
324 spin_lock_irqsave(&phba->hbalock, flags);
325 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
326 spin_unlock_irqrestore(&phba->hbalock, flags);
328 iocb = &dd_data->context_un.iocb;
331 cmp = cmdiocbq->context2;
332 bmp = cmdiocbq->context3;
333 rsp = &rspiocbq->iocb;
335 /* Copy the completed data or set the error status */
338 if (rsp->ulpStatus) {
339 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
340 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
341 case IOERR_SEQUENCE_TIMEOUT:
344 case IOERR_INVALID_RPI:
355 rsp_size = rsp->un.genreq64.bdl.bdeSize;
356 bsg_reply->reply_payload_rcv_len =
357 lpfc_bsg_copy_data(rmp, &job->reply_payload,
362 lpfc_free_bsg_buffers(phba, cmp);
363 lpfc_free_bsg_buffers(phba, rmp);
364 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
366 lpfc_sli_release_iocbq(phba, cmdiocbq);
370 /* Complete the job if the job is still active */
373 bsg_reply->result = rc;
374 bsg_job_done(job, bsg_reply->result,
375 bsg_reply->reply_payload_rcv_len);
381 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
382 * @job: fc_bsg_job to handle
385 lpfc_bsg_send_mgmt_cmd(struct bsg_job *job)
387 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
388 struct lpfc_hba *phba = vport->phba;
389 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
390 struct lpfc_nodelist *ndlp = rdata->pnode;
391 struct fc_bsg_reply *bsg_reply = job->reply;
392 struct ulp_bde64 *bpl = NULL;
394 struct lpfc_iocbq *cmdiocbq = NULL;
396 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
399 struct bsg_job_data *dd_data;
405 /* in case no data is transferred */
406 bsg_reply->reply_payload_rcv_len = 0;
408 /* allocate our bsg tracking structure */
409 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
411 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
412 "2733 Failed allocation of dd_data\n");
417 if (!lpfc_nlp_get(ndlp)) {
422 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
427 cmdiocbq = lpfc_sli_get_iocbq(phba);
433 cmd = &cmdiocbq->iocb;
435 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
440 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
446 INIT_LIST_HEAD(&bmp->list);
448 bpl = (struct ulp_bde64 *) bmp->virt;
449 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
450 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
451 1, bpl, &request_nseg);
456 lpfc_bsg_copy_data(cmp, &job->request_payload,
457 job->request_payload.payload_len, 1);
460 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
461 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
468 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
469 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
470 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
471 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
472 cmd->un.genreq64.bdl.bdeSize =
473 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
474 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
475 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
476 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
477 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
478 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
479 cmd->ulpBdeCount = 1;
481 cmd->ulpClass = CLASS3;
482 cmd->ulpContext = ndlp->nlp_rpi;
483 if (phba->sli_rev == LPFC_SLI_REV4)
484 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
485 cmd->ulpOwner = OWN_CHIP;
486 cmdiocbq->vport = phba->pport;
487 cmdiocbq->context3 = bmp;
488 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
489 timeout = phba->fc_ratov * 2;
490 cmd->ulpTimeout = timeout;
492 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
493 cmdiocbq->context1 = dd_data;
494 cmdiocbq->context2 = cmp;
495 cmdiocbq->context3 = bmp;
496 cmdiocbq->context_un.ndlp = ndlp;
497 dd_data->type = TYPE_IOCB;
498 dd_data->set_job = job;
499 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
500 dd_data->context_un.iocb.ndlp = ndlp;
501 dd_data->context_un.iocb.rmp = rmp;
502 job->dd_data = dd_data;
504 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
505 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
509 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
510 writel(creg_val, phba->HCregaddr);
511 readl(phba->HCregaddr); /* flush */
514 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
516 if (iocb_stat == IOCB_SUCCESS) {
517 spin_lock_irqsave(&phba->hbalock, flags);
518 /* make sure the I/O had not been completed yet */
519 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
520 /* open up abort window to timeout handler */
521 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
523 spin_unlock_irqrestore(&phba->hbalock, flags);
524 return 0; /* done for now */
525 } else if (iocb_stat == IOCB_BUSY) {
531 /* iocb failed so cleanup */
535 lpfc_free_bsg_buffers(phba, rmp);
537 lpfc_free_bsg_buffers(phba, cmp);
540 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
543 lpfc_sli_release_iocbq(phba, cmdiocbq);
549 /* make error code available to userspace */
550 bsg_reply->result = rc;
556 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
557 * @phba: Pointer to HBA context object.
558 * @cmdiocbq: Pointer to command iocb.
559 * @rspiocbq: Pointer to response iocb.
561 * This function is the completion handler for iocbs issued using
562 * lpfc_bsg_rport_els_cmp function. This function is called by the
563 * ring event handler function without any lock held. This function
564 * can be called from both worker thread context and interrupt
565 * context. This function also can be called from other thread which
566 * cleans up the SLI layer objects.
567 * This function copies the contents of the response iocb to the
568 * response iocb memory object provided by the caller of
569 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
570 * sleeps for the iocb completion.
573 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
574 struct lpfc_iocbq *cmdiocbq,
575 struct lpfc_iocbq *rspiocbq)
577 struct bsg_job_data *dd_data;
579 struct fc_bsg_reply *bsg_reply;
581 struct lpfc_nodelist *ndlp;
582 struct lpfc_dmabuf *pcmd = NULL, *prsp = NULL;
583 struct fc_bsg_ctels_reply *els_reply;
586 unsigned int rsp_size;
589 dd_data = cmdiocbq->context1;
590 ndlp = dd_data->context_un.iocb.ndlp;
591 cmdiocbq->context1 = ndlp;
593 /* Determine if job has been aborted */
594 spin_lock_irqsave(&phba->ct_ev_lock, flags);
595 job = dd_data->set_job;
597 bsg_reply = job->reply;
598 /* Prevent timeout handling from trying to abort job */
601 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
603 /* Close the timeout handler abort window */
604 spin_lock_irqsave(&phba->hbalock, flags);
605 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
606 spin_unlock_irqrestore(&phba->hbalock, flags);
608 rsp = &rspiocbq->iocb;
609 pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2;
610 prsp = (struct lpfc_dmabuf *)pcmd->list.next;
612 /* Copy the completed job data or determine the job status if job is
617 if (rsp->ulpStatus == IOSTAT_SUCCESS) {
618 rsp_size = rsp->un.elsreq64.bdl.bdeSize;
619 bsg_reply->reply_payload_rcv_len =
620 sg_copy_from_buffer(job->reply_payload.sg_list,
621 job->reply_payload.sg_cnt,
624 } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
625 bsg_reply->reply_payload_rcv_len =
626 sizeof(struct fc_bsg_ctels_reply);
627 /* LS_RJT data returned in word 4 */
628 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
629 els_reply = &bsg_reply->reply_data.ctels_reply;
630 els_reply->status = FC_CTELS_STATUS_REJECT;
631 els_reply->rjt_data.action = rjt_data[3];
632 els_reply->rjt_data.reason_code = rjt_data[2];
633 els_reply->rjt_data.reason_explanation = rjt_data[1];
634 els_reply->rjt_data.vendor_unique = rjt_data[0];
641 lpfc_els_free_iocb(phba, cmdiocbq);
644 /* Complete the job if the job is still active */
647 bsg_reply->result = rc;
648 bsg_job_done(job, bsg_reply->result,
649 bsg_reply->reply_payload_rcv_len);
655 * lpfc_bsg_rport_els - send an ELS command from a bsg request
656 * @job: fc_bsg_job to handle
659 lpfc_bsg_rport_els(struct bsg_job *job)
661 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
662 struct lpfc_hba *phba = vport->phba;
663 struct lpfc_rport_data *rdata = fc_bsg_to_rport(job)->dd_data;
664 struct lpfc_nodelist *ndlp = rdata->pnode;
665 struct fc_bsg_request *bsg_request = job->request;
666 struct fc_bsg_reply *bsg_reply = job->reply;
669 struct lpfc_iocbq *cmdiocbq;
671 struct bsg_job_data *dd_data;
676 /* in case no data is transferred */
677 bsg_reply->reply_payload_rcv_len = 0;
679 /* verify the els command is not greater than the
680 * maximum ELS transfer size.
683 if (job->request_payload.payload_len > FCELSSIZE) {
688 /* allocate our bsg tracking structure */
689 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
691 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
692 "2735 Failed allocation of dd_data\n");
697 elscmd = bsg_request->rqst_data.r_els.els_code;
698 cmdsize = job->request_payload.payload_len;
700 if (!lpfc_nlp_get(ndlp)) {
705 /* We will use the allocated dma buffers by prep els iocb for command
706 * and response to ensure if the job times out and the request is freed,
707 * we won't be dma into memory that is no longer allocated to for the
711 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
712 ndlp->nlp_DID, elscmd);
720 /* Transfer the request payload to allocated command dma buffer */
722 sg_copy_to_buffer(job->request_payload.sg_list,
723 job->request_payload.sg_cnt,
724 ((struct lpfc_dmabuf *)cmdiocbq->context2)->virt,
727 if (phba->sli_rev == LPFC_SLI_REV4)
728 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
730 cmdiocbq->iocb.ulpContext = rpi;
731 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
732 cmdiocbq->context1 = dd_data;
733 cmdiocbq->context_un.ndlp = ndlp;
734 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
735 dd_data->type = TYPE_IOCB;
736 dd_data->set_job = job;
737 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
738 dd_data->context_un.iocb.ndlp = ndlp;
739 dd_data->context_un.iocb.rmp = NULL;
740 job->dd_data = dd_data;
742 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
743 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
747 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
748 writel(creg_val, phba->HCregaddr);
749 readl(phba->HCregaddr); /* flush */
752 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
754 if (rc == IOCB_SUCCESS) {
755 spin_lock_irqsave(&phba->hbalock, flags);
756 /* make sure the I/O had not been completed/released */
757 if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) {
758 /* open up abort window to timeout handler */
759 cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
761 spin_unlock_irqrestore(&phba->hbalock, flags);
762 return 0; /* done for now */
763 } else if (rc == IOCB_BUSY) {
769 /* iocb failed so cleanup */
773 cmdiocbq->context1 = ndlp;
774 lpfc_els_free_iocb(phba, cmdiocbq);
783 /* make error code available to userspace */
784 bsg_reply->result = rc;
790 * lpfc_bsg_event_free - frees an allocated event structure
791 * @kref: Pointer to a kref.
793 * Called from kref_put. Back cast the kref into an event structure address.
794 * Free any events to get, delete associated nodes, free any events to see,
795 * free any data then free the event itself.
798 lpfc_bsg_event_free(struct kref *kref)
800 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
802 struct event_data *ed;
804 list_del(&evt->node);
806 while (!list_empty(&evt->events_to_get)) {
807 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
813 while (!list_empty(&evt->events_to_see)) {
814 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
825 * lpfc_bsg_event_ref - increments the kref for an event
826 * @evt: Pointer to an event structure.
829 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
831 kref_get(&evt->kref);
835 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
836 * @evt: Pointer to an event structure.
839 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
841 kref_put(&evt->kref, lpfc_bsg_event_free);
845 * lpfc_bsg_event_new - allocate and initialize a event structure
846 * @ev_mask: Mask of events.
847 * @ev_reg_id: Event reg id.
848 * @ev_req_id: Event request id.
850 static struct lpfc_bsg_event *
851 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
853 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
858 INIT_LIST_HEAD(&evt->events_to_get);
859 INIT_LIST_HEAD(&evt->events_to_see);
860 evt->type_mask = ev_mask;
861 evt->req_id = ev_req_id;
862 evt->reg_id = ev_reg_id;
863 evt->wait_time_stamp = jiffies;
865 init_waitqueue_head(&evt->wq);
866 kref_init(&evt->kref);
871 * diag_cmd_data_free - Frees an lpfc dma buffer extension
872 * @phba: Pointer to HBA context object.
873 * @mlist: Pointer to an lpfc dma buffer extension.
876 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
878 struct lpfc_dmabufext *mlast;
879 struct pci_dev *pcidev;
880 struct list_head head, *curr, *next;
882 if ((!mlist) || (!lpfc_is_link_up(phba) &&
883 (phba->link_flag & LS_LOOPBACK_MODE))) {
887 pcidev = phba->pcidev;
888 list_add_tail(&head, &mlist->dma.list);
890 list_for_each_safe(curr, next, &head) {
891 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
893 dma_free_coherent(&pcidev->dev,
903 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
908 * This function is called when an unsolicited CT command is received. It
909 * forwards the event to any processes registered to receive CT events.
912 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
913 struct lpfc_iocbq *piocbq)
915 uint32_t evt_req_id = 0;
917 struct lpfc_dmabuf *dmabuf = NULL;
918 struct lpfc_bsg_event *evt;
919 struct event_data *evt_dat = NULL;
920 struct lpfc_iocbq *iocbq;
922 struct list_head head;
923 struct ulp_bde64 *bde;
926 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
927 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
928 struct lpfc_hbq_entry *hbqe;
929 struct lpfc_sli_ct_request *ct_req;
930 struct bsg_job *job = NULL;
931 struct fc_bsg_reply *bsg_reply;
932 struct bsg_job_data *dd_data = NULL;
936 INIT_LIST_HEAD(&head);
937 list_add_tail(&head, &piocbq->list);
939 if (piocbq->iocb.ulpBdeCount == 0 ||
940 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
941 goto error_ct_unsol_exit;
943 if (phba->link_state == LPFC_HBA_ERROR ||
944 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
945 goto error_ct_unsol_exit;
947 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
950 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
951 piocbq->iocb.un.cont64[0].addrLow);
952 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
955 goto error_ct_unsol_exit;
956 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
957 evt_req_id = ct_req->FsType;
958 cmd = ct_req->CommandResponse.bits.CmdRsp;
959 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
960 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
962 spin_lock_irqsave(&phba->ct_ev_lock, flags);
963 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
964 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
965 evt->req_id != evt_req_id)
968 lpfc_bsg_event_ref(evt);
969 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
970 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
971 if (evt_dat == NULL) {
972 spin_lock_irqsave(&phba->ct_ev_lock, flags);
973 lpfc_bsg_event_unref(evt);
974 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
975 "2614 Memory allocation failed for "
980 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
981 /* take accumulated byte count from the last iocbq */
982 iocbq = list_entry(head.prev, typeof(*iocbq), list);
983 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
985 list_for_each_entry(iocbq, &head, list) {
986 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
988 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
992 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
993 if (evt_dat->data == NULL) {
994 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
995 "2615 Memory allocation failed for "
996 "CT event data, size %d\n",
999 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1000 lpfc_bsg_event_unref(evt);
1001 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1002 goto error_ct_unsol_exit;
1005 list_for_each_entry(iocbq, &head, list) {
1007 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
1008 bdeBuf1 = iocbq->context2;
1009 bdeBuf2 = iocbq->context3;
1011 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
1012 if (phba->sli3_options &
1013 LPFC_SLI3_HBQ_ENABLED) {
1015 hbqe = (struct lpfc_hbq_entry *)
1016 &iocbq->iocb.un.ulpWord[0];
1017 size = hbqe->bde.tus.f.bdeSize;
1019 } else if (i == 1) {
1020 hbqe = (struct lpfc_hbq_entry *)
1021 &iocbq->iocb.unsli3.
1023 size = hbqe->bde.tus.f.bdeSize;
1026 if ((offset + size) > evt_dat->len)
1027 size = evt_dat->len - offset;
1029 size = iocbq->iocb.un.cont64[i].
1031 bde = &iocbq->iocb.un.cont64[i];
1032 dma_addr = getPaddr(bde->addrHigh,
1034 dmabuf = lpfc_sli_ringpostbuf_get(phba,
1038 lpfc_printf_log(phba, KERN_ERR,
1039 LOG_LIBDFC, "2616 No dmabuf "
1040 "found for iocbq 0x%p\n",
1042 kfree(evt_dat->data);
1044 spin_lock_irqsave(&phba->ct_ev_lock,
1046 lpfc_bsg_event_unref(evt);
1047 spin_unlock_irqrestore(
1048 &phba->ct_ev_lock, flags);
1049 goto error_ct_unsol_exit;
1051 memcpy((char *)(evt_dat->data) + offset,
1052 dmabuf->virt, size);
1054 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
1055 !(phba->sli3_options &
1056 LPFC_SLI3_HBQ_ENABLED)) {
1057 lpfc_sli_ringpostbuf_put(phba, pring,
1061 case ELX_LOOPBACK_DATA:
1064 diag_cmd_data_free(phba,
1065 (struct lpfc_dmabufext
1068 case ELX_LOOPBACK_XRI_SETUP:
1069 if ((phba->sli_rev ==
1071 (phba->sli3_options &
1072 LPFC_SLI3_HBQ_ENABLED
1074 lpfc_in_buf_free(phba,
1077 lpfc_post_buffer(phba,
1083 if (!(phba->sli3_options &
1084 LPFC_SLI3_HBQ_ENABLED))
1085 lpfc_post_buffer(phba,
1094 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1095 if (phba->sli_rev == LPFC_SLI_REV4) {
1096 evt_dat->immed_dat = phba->ctx_idx;
1097 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
1098 /* Provide warning for over-run of the ct_ctx array */
1099 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
1101 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1102 "2717 CT context array entry "
1103 "[%d] over-run: oxid:x%x, "
1104 "sid:x%x\n", phba->ctx_idx,
1106 evt_dat->immed_dat].oxid,
1108 evt_dat->immed_dat].SID);
1109 phba->ct_ctx[evt_dat->immed_dat].rxid =
1110 piocbq->iocb.ulpContext;
1111 phba->ct_ctx[evt_dat->immed_dat].oxid =
1112 piocbq->iocb.unsli3.rcvsli3.ox_id;
1113 phba->ct_ctx[evt_dat->immed_dat].SID =
1114 piocbq->iocb.un.rcvels.remoteID;
1115 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
1117 evt_dat->immed_dat = piocbq->iocb.ulpContext;
1119 evt_dat->type = FC_REG_CT_EVENT;
1120 list_add(&evt_dat->node, &evt->events_to_see);
1121 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
1122 wake_up_interruptible(&evt->wq);
1123 lpfc_bsg_event_unref(evt);
1127 list_move(evt->events_to_see.prev, &evt->events_to_get);
1129 dd_data = (struct bsg_job_data *)evt->dd_data;
1130 job = dd_data->set_job;
1131 dd_data->set_job = NULL;
1132 lpfc_bsg_event_unref(evt);
1134 bsg_reply = job->reply;
1135 bsg_reply->reply_payload_rcv_len = size;
1136 /* make error code available to userspace */
1137 bsg_reply->result = 0;
1138 job->dd_data = NULL;
1139 /* complete the job back to userspace */
1140 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1141 bsg_job_done(job, bsg_reply->result,
1142 bsg_reply->reply_payload_rcv_len);
1143 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1146 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1148 error_ct_unsol_exit:
1149 if (!list_empty(&head))
1151 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1152 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1158 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1159 * @phba: Pointer to HBA context object.
1160 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1162 * This function handles abort to the CT command toward management plane
1165 * If the pending context of a CT command to management plane present, clears
1166 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1167 * no context exists.
1170 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1172 struct fc_frame_header fc_hdr;
1173 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1174 int ctx_idx, handled = 0;
1175 uint16_t oxid, rxid;
1178 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1179 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1180 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1181 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1183 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1184 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1186 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1188 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1190 if (phba->ct_ctx[ctx_idx].SID != sid)
1192 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1199 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1200 * @job: SET_EVENT fc_bsg_job
1203 lpfc_bsg_hba_set_event(struct bsg_job *job)
1205 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1206 struct lpfc_hba *phba = vport->phba;
1207 struct fc_bsg_request *bsg_request = job->request;
1208 struct set_ct_event *event_req;
1209 struct lpfc_bsg_event *evt;
1211 struct bsg_job_data *dd_data = NULL;
1213 unsigned long flags;
1215 if (job->request_len <
1216 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1217 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1218 "2612 Received SET_CT_EVENT below minimum "
1224 event_req = (struct set_ct_event *)
1225 bsg_request->rqst_data.h_vendor.vendor_cmd;
1226 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1228 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1229 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1230 if (evt->reg_id == event_req->ev_reg_id) {
1231 lpfc_bsg_event_ref(evt);
1232 evt->wait_time_stamp = jiffies;
1233 dd_data = (struct bsg_job_data *)evt->dd_data;
1237 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1239 if (&evt->node == &phba->ct_ev_waiters) {
1240 /* no event waiting struct yet - first call */
1241 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1242 if (dd_data == NULL) {
1243 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1244 "2734 Failed allocation of dd_data\n");
1248 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1249 event_req->ev_req_id);
1251 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1252 "2617 Failed allocation of event "
1257 dd_data->type = TYPE_EVT;
1258 dd_data->set_job = NULL;
1259 dd_data->context_un.evt = evt;
1260 evt->dd_data = (void *)dd_data;
1261 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1262 list_add(&evt->node, &phba->ct_ev_waiters);
1263 lpfc_bsg_event_ref(evt);
1264 evt->wait_time_stamp = jiffies;
1265 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1268 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1270 dd_data->set_job = job; /* for unsolicited command */
1271 job->dd_data = dd_data; /* for fc transport timeout callback*/
1272 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1273 return 0; /* call job done later */
1276 if (dd_data != NULL)
1279 job->dd_data = NULL;
1284 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1285 * @job: GET_EVENT fc_bsg_job
1288 lpfc_bsg_hba_get_event(struct bsg_job *job)
1290 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1291 struct lpfc_hba *phba = vport->phba;
1292 struct fc_bsg_request *bsg_request = job->request;
1293 struct fc_bsg_reply *bsg_reply = job->reply;
1294 struct get_ct_event *event_req;
1295 struct get_ct_event_reply *event_reply;
1296 struct lpfc_bsg_event *evt, *evt_next;
1297 struct event_data *evt_dat = NULL;
1298 unsigned long flags;
1301 if (job->request_len <
1302 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1303 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1304 "2613 Received GET_CT_EVENT request below "
1310 event_req = (struct get_ct_event *)
1311 bsg_request->rqst_data.h_vendor.vendor_cmd;
1313 event_reply = (struct get_ct_event_reply *)
1314 bsg_reply->reply_data.vendor_reply.vendor_rsp;
1315 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1316 list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) {
1317 if (evt->reg_id == event_req->ev_reg_id) {
1318 if (list_empty(&evt->events_to_get))
1320 lpfc_bsg_event_ref(evt);
1321 evt->wait_time_stamp = jiffies;
1322 evt_dat = list_entry(evt->events_to_get.prev,
1323 struct event_data, node);
1324 list_del(&evt_dat->node);
1328 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1330 /* The app may continue to ask for event data until it gets
1331 * an error indicating that there isn't anymore
1333 if (evt_dat == NULL) {
1334 bsg_reply->reply_payload_rcv_len = 0;
1339 if (evt_dat->len > job->request_payload.payload_len) {
1340 evt_dat->len = job->request_payload.payload_len;
1341 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1342 "2618 Truncated event data at %d "
1344 job->request_payload.payload_len);
1347 event_reply->type = evt_dat->type;
1348 event_reply->immed_data = evt_dat->immed_dat;
1349 if (evt_dat->len > 0)
1350 bsg_reply->reply_payload_rcv_len =
1351 sg_copy_from_buffer(job->request_payload.sg_list,
1352 job->request_payload.sg_cnt,
1353 evt_dat->data, evt_dat->len);
1355 bsg_reply->reply_payload_rcv_len = 0;
1358 kfree(evt_dat->data);
1362 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1363 lpfc_bsg_event_unref(evt);
1364 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1365 job->dd_data = NULL;
1366 bsg_reply->result = 0;
1367 bsg_job_done(job, bsg_reply->result,
1368 bsg_reply->reply_payload_rcv_len);
1372 job->dd_data = NULL;
1373 bsg_reply->result = rc;
1378 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1379 * @phba: Pointer to HBA context object.
1380 * @cmdiocbq: Pointer to command iocb.
1381 * @rspiocbq: Pointer to response iocb.
1383 * This function is the completion handler for iocbs issued using
1384 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1385 * ring event handler function without any lock held. This function
1386 * can be called from both worker thread context and interrupt
1387 * context. This function also can be called from other thread which
1388 * cleans up the SLI layer objects.
1389 * This function copy the contents of the response iocb to the
1390 * response iocb memory object provided by the caller of
1391 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1392 * sleeps for the iocb completion.
1395 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1396 struct lpfc_iocbq *cmdiocbq,
1397 struct lpfc_iocbq *rspiocbq)
1399 struct bsg_job_data *dd_data;
1400 struct bsg_job *job;
1401 struct fc_bsg_reply *bsg_reply;
1403 struct lpfc_dmabuf *bmp, *cmp;
1404 struct lpfc_nodelist *ndlp;
1405 unsigned long flags;
1408 dd_data = cmdiocbq->context1;
1410 /* Determine if job has been aborted */
1411 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1412 job = dd_data->set_job;
1414 /* Prevent timeout handling from trying to abort job */
1415 job->dd_data = NULL;
1417 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1419 /* Close the timeout handler abort window */
1420 spin_lock_irqsave(&phba->hbalock, flags);
1421 cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING;
1422 spin_unlock_irqrestore(&phba->hbalock, flags);
1424 ndlp = dd_data->context_un.iocb.ndlp;
1425 cmp = cmdiocbq->context2;
1426 bmp = cmdiocbq->context3;
1427 rsp = &rspiocbq->iocb;
1429 /* Copy the completed job data or set the error status */
1432 bsg_reply = job->reply;
1433 if (rsp->ulpStatus) {
1434 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1435 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1436 case IOERR_SEQUENCE_TIMEOUT:
1439 case IOERR_INVALID_RPI:
1450 bsg_reply->reply_payload_rcv_len = 0;
1454 lpfc_free_bsg_buffers(phba, cmp);
1455 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1457 lpfc_sli_release_iocbq(phba, cmdiocbq);
1461 /* Complete the job if the job is still active */
1464 bsg_reply->result = rc;
1465 bsg_job_done(job, bsg_reply->result,
1466 bsg_reply->reply_payload_rcv_len);
1472 * lpfc_issue_ct_rsp - issue a ct response
1473 * @phba: Pointer to HBA context object.
1474 * @job: Pointer to the job object.
1475 * @tag: tag index value into the ports context exchange array.
1476 * @bmp: Pointer to a dma buffer descriptor.
1477 * @num_entry: Number of enties in the bde.
1480 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag,
1481 struct lpfc_dmabuf *cmp, struct lpfc_dmabuf *bmp,
1485 struct lpfc_iocbq *ctiocb = NULL;
1487 struct lpfc_nodelist *ndlp = NULL;
1488 struct bsg_job_data *dd_data;
1489 unsigned long flags;
1492 /* allocate our bsg tracking structure */
1493 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1495 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1496 "2736 Failed allocation of dd_data\n");
1501 /* Allocate buffer for command iocb */
1502 ctiocb = lpfc_sli_get_iocbq(phba);
1508 icmd = &ctiocb->iocb;
1509 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1510 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1511 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1512 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1513 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1514 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1515 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1516 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1517 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1519 /* Fill in rest of iocb */
1520 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1521 icmd->ulpBdeCount = 1;
1523 icmd->ulpClass = CLASS3;
1524 if (phba->sli_rev == LPFC_SLI_REV4) {
1525 /* Do not issue unsol response if oxid not marked as valid */
1526 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1528 goto issue_ct_rsp_exit;
1530 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1531 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1532 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1534 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1535 "2721 ndlp null for oxid %x SID %x\n",
1537 phba->ct_ctx[tag].SID);
1539 goto issue_ct_rsp_exit;
1542 /* Check if the ndlp is active */
1543 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1545 goto issue_ct_rsp_exit;
1548 /* get a refernece count so the ndlp doesn't go away while
1551 if (!lpfc_nlp_get(ndlp)) {
1553 goto issue_ct_rsp_exit;
1556 icmd->un.ulpWord[3] =
1557 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1559 /* The exchange is done, mark the entry as invalid */
1560 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1562 icmd->ulpContext = (ushort) tag;
1564 icmd->ulpTimeout = phba->fc_ratov * 2;
1566 /* Xmit CT response on exchange <xid> */
1567 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1568 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1569 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1571 ctiocb->iocb_cmpl = NULL;
1572 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1573 ctiocb->vport = phba->pport;
1574 ctiocb->context1 = dd_data;
1575 ctiocb->context2 = cmp;
1576 ctiocb->context3 = bmp;
1577 ctiocb->context_un.ndlp = ndlp;
1578 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1580 dd_data->type = TYPE_IOCB;
1581 dd_data->set_job = job;
1582 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1583 dd_data->context_un.iocb.ndlp = ndlp;
1584 dd_data->context_un.iocb.rmp = NULL;
1585 job->dd_data = dd_data;
1587 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1588 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1590 goto issue_ct_rsp_exit;
1592 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1593 writel(creg_val, phba->HCregaddr);
1594 readl(phba->HCregaddr); /* flush */
1597 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1599 if (rc == IOCB_SUCCESS) {
1600 spin_lock_irqsave(&phba->hbalock, flags);
1601 /* make sure the I/O had not been completed/released */
1602 if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) {
1603 /* open up abort window to timeout handler */
1604 ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING;
1606 spin_unlock_irqrestore(&phba->hbalock, flags);
1607 return 0; /* done for now */
1610 /* iocb failed so cleanup */
1611 job->dd_data = NULL;
1614 lpfc_sli_release_iocbq(phba, ctiocb);
1622 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1623 * @job: SEND_MGMT_RESP fc_bsg_job
1626 lpfc_bsg_send_mgmt_rsp(struct bsg_job *job)
1628 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
1629 struct lpfc_hba *phba = vport->phba;
1630 struct fc_bsg_request *bsg_request = job->request;
1631 struct fc_bsg_reply *bsg_reply = job->reply;
1632 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1633 bsg_request->rqst_data.h_vendor.vendor_cmd;
1634 struct ulp_bde64 *bpl;
1635 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL;
1637 uint32_t tag = mgmt_resp->tag;
1638 unsigned long reqbfrcnt =
1639 (unsigned long)job->request_payload.payload_len;
1642 /* in case no data is transferred */
1643 bsg_reply->reply_payload_rcv_len = 0;
1645 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1647 goto send_mgmt_rsp_exit;
1650 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1653 goto send_mgmt_rsp_exit;
1656 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1659 goto send_mgmt_rsp_free_bmp;
1662 INIT_LIST_HEAD(&bmp->list);
1663 bpl = (struct ulp_bde64 *) bmp->virt;
1664 bpl_entries = (LPFC_BPL_SIZE/sizeof(struct ulp_bde64));
1665 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
1666 1, bpl, &bpl_entries);
1669 goto send_mgmt_rsp_free_bmp;
1671 lpfc_bsg_copy_data(cmp, &job->request_payload,
1672 job->request_payload.payload_len, 1);
1674 rc = lpfc_issue_ct_rsp(phba, job, tag, cmp, bmp, bpl_entries);
1676 if (rc == IOCB_SUCCESS)
1677 return 0; /* done for now */
1681 lpfc_free_bsg_buffers(phba, cmp);
1683 send_mgmt_rsp_free_bmp:
1685 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1688 /* make error code available to userspace */
1689 bsg_reply->result = rc;
1690 job->dd_data = NULL;
1695 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1696 * @phba: Pointer to HBA context object.
1698 * This function is responsible for preparing driver for diag loopback
1702 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1704 struct lpfc_vport **vports;
1705 struct Scsi_Host *shost;
1706 struct lpfc_sli *psli;
1707 struct lpfc_sli_ring *pring;
1714 pring = &psli->ring[LPFC_FCP_RING];
1718 if ((phba->link_state == LPFC_HBA_ERROR) ||
1719 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1720 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1723 vports = lpfc_create_vport_work_array(phba);
1725 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1726 shost = lpfc_shost_from_vport(vports[i]);
1727 scsi_block_requests(shost);
1729 lpfc_destroy_vport_work_array(phba, vports);
1731 shost = lpfc_shost_from_vport(phba->pport);
1732 scsi_block_requests(shost);
1735 while (!list_empty(&pring->txcmplq)) {
1736 if (i++ > 500) /* wait up to 5 seconds */
1744 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1745 * @phba: Pointer to HBA context object.
1747 * This function is responsible for driver exit processing of setting up
1748 * diag loopback mode on device.
1751 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1753 struct Scsi_Host *shost;
1754 struct lpfc_vport **vports;
1757 vports = lpfc_create_vport_work_array(phba);
1759 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1760 shost = lpfc_shost_from_vport(vports[i]);
1761 scsi_unblock_requests(shost);
1763 lpfc_destroy_vport_work_array(phba, vports);
1765 shost = lpfc_shost_from_vport(phba->pport);
1766 scsi_unblock_requests(shost);
1772 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1773 * @phba: Pointer to HBA context object.
1774 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1776 * This function is responsible for placing an sli3 port into diagnostic
1777 * loopback mode in order to perform a diagnostic loopback test.
1778 * All new scsi requests are blocked, a small delay is used to allow the
1779 * scsi requests to complete then the link is brought down. If the link is
1780 * is placed in loopback mode then scsi requests are again allowed
1781 * so the scsi mid-layer doesn't give up on the port.
1782 * All of this is done in-line.
1785 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
1787 struct fc_bsg_request *bsg_request = job->request;
1788 struct fc_bsg_reply *bsg_reply = job->reply;
1789 struct diag_mode_set *loopback_mode;
1790 uint32_t link_flags;
1792 LPFC_MBOXQ_t *pmboxq = NULL;
1793 int mbxstatus = MBX_SUCCESS;
1797 /* no data to return just the return code */
1798 bsg_reply->reply_payload_rcv_len = 0;
1800 if (job->request_len < sizeof(struct fc_bsg_request) +
1801 sizeof(struct diag_mode_set)) {
1802 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1803 "2738 Received DIAG MODE request size:%d "
1804 "below the minimum size:%d\n",
1806 (int)(sizeof(struct fc_bsg_request) +
1807 sizeof(struct diag_mode_set)));
1812 rc = lpfc_bsg_diag_mode_enter(phba);
1816 /* bring the link to diagnostic mode */
1817 loopback_mode = (struct diag_mode_set *)
1818 bsg_request->rqst_data.h_vendor.vendor_cmd;
1819 link_flags = loopback_mode->type;
1820 timeout = loopback_mode->timeout * 100;
1822 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1825 goto loopback_mode_exit;
1827 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1828 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1829 pmboxq->u.mb.mbxOwner = OWN_HOST;
1831 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1833 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1834 /* wait for link down before proceeding */
1836 while (phba->link_state != LPFC_LINK_DOWN) {
1837 if (i++ > timeout) {
1839 goto loopback_mode_exit;
1844 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1845 if (link_flags == INTERNAL_LOOP_BACK)
1846 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1848 pmboxq->u.mb.un.varInitLnk.link_flags =
1849 FLAGS_TOPOLOGY_MODE_LOOP;
1851 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1852 pmboxq->u.mb.mbxOwner = OWN_HOST;
1854 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1857 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1860 spin_lock_irq(&phba->hbalock);
1861 phba->link_flag |= LS_LOOPBACK_MODE;
1862 spin_unlock_irq(&phba->hbalock);
1863 /* wait for the link attention interrupt */
1867 while (phba->link_state != LPFC_HBA_READY) {
1868 if (i++ > timeout) {
1881 lpfc_bsg_diag_mode_exit(phba);
1884 * Let SLI layer release mboxq if mbox command completed after timeout.
1886 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1887 mempool_free(pmboxq, phba->mbox_mem_pool);
1890 /* make error code available to userspace */
1891 bsg_reply->result = rc;
1892 /* complete the job back to userspace if no error */
1894 bsg_job_done(job, bsg_reply->result,
1895 bsg_reply->reply_payload_rcv_len);
1900 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1901 * @phba: Pointer to HBA context object.
1902 * @diag: Flag for set link to diag or nomral operation state.
1904 * This function is responsible for issuing a sli4 mailbox command for setting
1905 * link to either diag state or normal operation state.
1908 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1910 LPFC_MBOXQ_t *pmboxq;
1911 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1912 uint32_t req_len, alloc_len;
1913 int mbxstatus = MBX_SUCCESS, rc;
1915 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1919 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1920 sizeof(struct lpfc_sli4_cfg_mhdr));
1921 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1922 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1923 req_len, LPFC_SLI4_MBX_EMBED);
1924 if (alloc_len != req_len) {
1926 goto link_diag_state_set_out;
1928 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1929 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1930 diag, phba->sli4_hba.lnk_info.lnk_tp,
1931 phba->sli4_hba.lnk_info.lnk_no);
1933 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1934 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1935 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1936 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1937 phba->sli4_hba.lnk_info.lnk_no);
1938 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1939 phba->sli4_hba.lnk_info.lnk_tp);
1941 bf_set(lpfc_mbx_set_diag_state_diag,
1942 &link_diag_state->u.req, 1);
1944 bf_set(lpfc_mbx_set_diag_state_diag,
1945 &link_diag_state->u.req, 0);
1947 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1949 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1954 link_diag_state_set_out:
1955 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1956 mempool_free(pmboxq, phba->mbox_mem_pool);
1962 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1963 * @phba: Pointer to HBA context object.
1965 * This function is responsible for issuing a sli4 mailbox command for setting
1966 * up internal loopback diagnostic.
1969 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1971 LPFC_MBOXQ_t *pmboxq;
1972 uint32_t req_len, alloc_len;
1973 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1974 int mbxstatus = MBX_SUCCESS, rc = 0;
1976 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1979 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1980 sizeof(struct lpfc_sli4_cfg_mhdr));
1981 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1982 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1983 req_len, LPFC_SLI4_MBX_EMBED);
1984 if (alloc_len != req_len) {
1985 mempool_free(pmboxq, phba->mbox_mem_pool);
1988 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1989 bf_set(lpfc_mbx_set_diag_state_link_num,
1990 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1991 bf_set(lpfc_mbx_set_diag_state_link_type,
1992 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1993 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1994 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1996 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1997 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1998 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1999 "3127 Failed setup loopback mode mailbox "
2000 "command, rc:x%x, status:x%x\n", mbxstatus,
2001 pmboxq->u.mb.mbxStatus);
2004 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
2005 mempool_free(pmboxq, phba->mbox_mem_pool);
2010 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
2011 * @phba: Pointer to HBA context object.
2013 * This function set up SLI4 FC port registrations for diagnostic run, which
2014 * includes all the rpis, vfi, and also vpi.
2017 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
2021 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
2022 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2023 "3136 Port still had vfi registered: "
2024 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
2025 phba->pport->fc_myDID, phba->fcf.fcfi,
2026 phba->sli4_hba.vfi_ids[phba->pport->vfi],
2027 phba->vpi_ids[phba->pport->vpi]);
2030 rc = lpfc_issue_reg_vfi(phba->pport);
2035 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
2036 * @phba: Pointer to HBA context object.
2037 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2039 * This function is responsible for placing an sli4 port into diagnostic
2040 * loopback mode in order to perform a diagnostic loopback test.
2043 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct bsg_job *job)
2045 struct fc_bsg_request *bsg_request = job->request;
2046 struct fc_bsg_reply *bsg_reply = job->reply;
2047 struct diag_mode_set *loopback_mode;
2048 uint32_t link_flags, timeout;
2051 /* no data to return just the return code */
2052 bsg_reply->reply_payload_rcv_len = 0;
2054 if (job->request_len < sizeof(struct fc_bsg_request) +
2055 sizeof(struct diag_mode_set)) {
2056 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2057 "3011 Received DIAG MODE request size:%d "
2058 "below the minimum size:%d\n",
2060 (int)(sizeof(struct fc_bsg_request) +
2061 sizeof(struct diag_mode_set)));
2066 rc = lpfc_bsg_diag_mode_enter(phba);
2070 /* indicate we are in loobpack diagnostic mode */
2071 spin_lock_irq(&phba->hbalock);
2072 phba->link_flag |= LS_LOOPBACK_MODE;
2073 spin_unlock_irq(&phba->hbalock);
2075 /* reset port to start frome scratch */
2076 rc = lpfc_selective_reset(phba);
2080 /* bring the link to diagnostic mode */
2081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2082 "3129 Bring link to diagnostic state.\n");
2083 loopback_mode = (struct diag_mode_set *)
2084 bsg_request->rqst_data.h_vendor.vendor_cmd;
2085 link_flags = loopback_mode->type;
2086 timeout = loopback_mode->timeout * 100;
2088 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2090 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2091 "3130 Failed to bring link to diagnostic "
2092 "state, rc:x%x\n", rc);
2093 goto loopback_mode_exit;
2096 /* wait for link down before proceeding */
2098 while (phba->link_state != LPFC_LINK_DOWN) {
2099 if (i++ > timeout) {
2101 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2102 "3131 Timeout waiting for link to "
2103 "diagnostic mode, timeout:%d ms\n",
2105 goto loopback_mode_exit;
2110 /* set up loopback mode */
2111 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2112 "3132 Set up loopback mode:x%x\n", link_flags);
2114 if (link_flags == INTERNAL_LOOP_BACK)
2115 rc = lpfc_sli4_bsg_set_internal_loopback(phba);
2116 else if (link_flags == EXTERNAL_LOOP_BACK)
2117 rc = lpfc_hba_init_link_fc_topology(phba,
2118 FLAGS_TOPOLOGY_MODE_PT_PT,
2122 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2123 "3141 Loopback mode:x%x not supported\n",
2125 goto loopback_mode_exit;
2129 /* wait for the link attention interrupt */
2132 while (phba->link_state < LPFC_LINK_UP) {
2133 if (i++ > timeout) {
2135 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2136 "3137 Timeout waiting for link up "
2137 "in loopback mode, timeout:%d ms\n",
2145 /* port resource registration setup for loopback diagnostic */
2147 /* set up a none zero myDID for loopback test */
2148 phba->pport->fc_myDID = 1;
2149 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
2151 goto loopback_mode_exit;
2154 /* wait for the port ready */
2157 while (phba->link_state != LPFC_HBA_READY) {
2158 if (i++ > timeout) {
2160 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2161 "3133 Timeout waiting for port "
2162 "loopback mode ready, timeout:%d ms\n",
2171 /* clear loopback diagnostic mode */
2173 spin_lock_irq(&phba->hbalock);
2174 phba->link_flag &= ~LS_LOOPBACK_MODE;
2175 spin_unlock_irq(&phba->hbalock);
2177 lpfc_bsg_diag_mode_exit(phba);
2180 /* make error code available to userspace */
2181 bsg_reply->result = rc;
2182 /* complete the job back to userspace if no error */
2184 bsg_job_done(job, bsg_reply->result,
2185 bsg_reply->reply_payload_rcv_len);
2190 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2191 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2193 * This function is responsible for responding to check and dispatch bsg diag
2194 * command from the user to proper driver action routines.
2197 lpfc_bsg_diag_loopback_mode(struct bsg_job *job)
2199 struct Scsi_Host *shost;
2200 struct lpfc_vport *vport;
2201 struct lpfc_hba *phba;
2204 shost = fc_bsg_to_shost(job);
2207 vport = shost_priv(shost);
2214 if (phba->sli_rev < LPFC_SLI_REV4)
2215 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2216 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
2217 LPFC_SLI_INTF_IF_TYPE_2)
2218 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2226 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2227 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2229 * This function is responsible for responding to check and dispatch bsg diag
2230 * command from the user to proper driver action routines.
2233 lpfc_sli4_bsg_diag_mode_end(struct bsg_job *job)
2235 struct fc_bsg_request *bsg_request = job->request;
2236 struct fc_bsg_reply *bsg_reply = job->reply;
2237 struct Scsi_Host *shost;
2238 struct lpfc_vport *vport;
2239 struct lpfc_hba *phba;
2240 struct diag_mode_set *loopback_mode_end_cmd;
2244 shost = fc_bsg_to_shost(job);
2247 vport = shost_priv(shost);
2254 if (phba->sli_rev < LPFC_SLI_REV4)
2256 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2257 LPFC_SLI_INTF_IF_TYPE_2)
2260 /* clear loopback diagnostic mode */
2261 spin_lock_irq(&phba->hbalock);
2262 phba->link_flag &= ~LS_LOOPBACK_MODE;
2263 spin_unlock_irq(&phba->hbalock);
2264 loopback_mode_end_cmd = (struct diag_mode_set *)
2265 bsg_request->rqst_data.h_vendor.vendor_cmd;
2266 timeout = loopback_mode_end_cmd->timeout * 100;
2268 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2271 "3139 Failed to bring link to diagnostic "
2272 "state, rc:x%x\n", rc);
2273 goto loopback_mode_end_exit;
2276 /* wait for link down before proceeding */
2278 while (phba->link_state != LPFC_LINK_DOWN) {
2279 if (i++ > timeout) {
2280 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2281 "3140 Timeout waiting for link to "
2282 "diagnostic mode_end, timeout:%d ms\n",
2284 /* there is nothing much we can do here */
2290 /* reset port resource registrations */
2291 rc = lpfc_selective_reset(phba);
2292 phba->pport->fc_myDID = 0;
2294 loopback_mode_end_exit:
2295 /* make return code available to userspace */
2296 bsg_reply->result = rc;
2297 /* complete the job back to userspace if no error */
2299 bsg_job_done(job, bsg_reply->result,
2300 bsg_reply->reply_payload_rcv_len);
2305 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2306 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2308 * This function is to perform SLI4 diag link test request from the user
2312 lpfc_sli4_bsg_link_diag_test(struct bsg_job *job)
2314 struct fc_bsg_request *bsg_request = job->request;
2315 struct fc_bsg_reply *bsg_reply = job->reply;
2316 struct Scsi_Host *shost;
2317 struct lpfc_vport *vport;
2318 struct lpfc_hba *phba;
2319 LPFC_MBOXQ_t *pmboxq;
2320 struct sli4_link_diag *link_diag_test_cmd;
2321 uint32_t req_len, alloc_len;
2322 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2323 union lpfc_sli4_cfg_shdr *shdr;
2324 uint32_t shdr_status, shdr_add_status;
2325 struct diag_status *diag_status_reply;
2326 int mbxstatus, rc = 0;
2328 shost = fc_bsg_to_shost(job);
2333 vport = shost_priv(shost);
2344 if (phba->sli_rev < LPFC_SLI_REV4) {
2348 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2349 LPFC_SLI_INTF_IF_TYPE_2) {
2354 if (job->request_len < sizeof(struct fc_bsg_request) +
2355 sizeof(struct sli4_link_diag)) {
2356 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2357 "3013 Received LINK DIAG TEST request "
2358 " size:%d below the minimum size:%d\n",
2360 (int)(sizeof(struct fc_bsg_request) +
2361 sizeof(struct sli4_link_diag)));
2366 rc = lpfc_bsg_diag_mode_enter(phba);
2370 link_diag_test_cmd = (struct sli4_link_diag *)
2371 bsg_request->rqst_data.h_vendor.vendor_cmd;
2373 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2378 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2381 goto link_diag_test_exit;
2384 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2385 sizeof(struct lpfc_sli4_cfg_mhdr));
2386 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2387 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2388 req_len, LPFC_SLI4_MBX_EMBED);
2389 if (alloc_len != req_len) {
2391 goto link_diag_test_exit;
2393 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2394 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2395 phba->sli4_hba.lnk_info.lnk_no);
2396 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2397 phba->sli4_hba.lnk_info.lnk_tp);
2398 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2399 link_diag_test_cmd->test_id);
2400 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2401 link_diag_test_cmd->loops);
2402 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2403 link_diag_test_cmd->test_version);
2404 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2405 link_diag_test_cmd->error_action);
2407 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2409 shdr = (union lpfc_sli4_cfg_shdr *)
2410 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2411 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2412 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2413 if (shdr_status || shdr_add_status || mbxstatus) {
2414 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2415 "3010 Run link diag test mailbox failed with "
2416 "mbx_status x%x status x%x, add_status x%x\n",
2417 mbxstatus, shdr_status, shdr_add_status);
2420 diag_status_reply = (struct diag_status *)
2421 bsg_reply->reply_data.vendor_reply.vendor_rsp;
2423 if (job->reply_len <
2424 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2425 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2426 "3012 Received Run link diag test reply "
2427 "below minimum size (%d): reply_len:%d\n",
2428 (int)(sizeof(struct fc_bsg_request) +
2429 sizeof(struct diag_status)),
2435 diag_status_reply->mbox_status = mbxstatus;
2436 diag_status_reply->shdr_status = shdr_status;
2437 diag_status_reply->shdr_add_status = shdr_add_status;
2439 link_diag_test_exit:
2440 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2443 mempool_free(pmboxq, phba->mbox_mem_pool);
2445 lpfc_bsg_diag_mode_exit(phba);
2448 /* make error code available to userspace */
2449 bsg_reply->result = rc;
2450 /* complete the job back to userspace if no error */
2452 bsg_job_done(job, bsg_reply->result,
2453 bsg_reply->reply_payload_rcv_len);
2458 * lpfcdiag_loop_self_reg - obtains a remote port login id
2459 * @phba: Pointer to HBA context object
2460 * @rpi: Pointer to a remote port login id
2462 * This function obtains a remote port login id so the diag loopback test
2463 * can send and receive its own unsolicited CT command.
2465 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2468 struct lpfc_dmabuf *dmabuff;
2471 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2475 if (phba->sli_rev < LPFC_SLI_REV4)
2476 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2477 (uint8_t *)&phba->pport->fc_sparam,
2480 *rpi = lpfc_sli4_alloc_rpi(phba);
2481 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2482 phba->pport->fc_myDID,
2483 (uint8_t *)&phba->pport->fc_sparam,
2488 mempool_free(mbox, phba->mbox_mem_pool);
2489 if (phba->sli_rev == LPFC_SLI_REV4)
2490 lpfc_sli4_free_rpi(phba, *rpi);
2494 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2495 mbox->context1 = NULL;
2496 mbox->context2 = NULL;
2497 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2499 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2500 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2502 if (status != MBX_TIMEOUT)
2503 mempool_free(mbox, phba->mbox_mem_pool);
2504 if (phba->sli_rev == LPFC_SLI_REV4)
2505 lpfc_sli4_free_rpi(phba, *rpi);
2509 if (phba->sli_rev < LPFC_SLI_REV4)
2510 *rpi = mbox->u.mb.un.varWords[0];
2512 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2514 mempool_free(mbox, phba->mbox_mem_pool);
2519 * lpfcdiag_loop_self_unreg - unregs from the rpi
2520 * @phba: Pointer to HBA context object
2521 * @rpi: Remote port login id
2523 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2525 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2530 /* Allocate mboxq structure */
2531 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2535 if (phba->sli_rev < LPFC_SLI_REV4)
2536 lpfc_unreg_login(phba, 0, rpi, mbox);
2538 lpfc_unreg_login(phba, phba->pport->vpi,
2539 phba->sli4_hba.rpi_ids[rpi], mbox);
2541 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2543 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2544 if (status != MBX_TIMEOUT)
2545 mempool_free(mbox, phba->mbox_mem_pool);
2548 mempool_free(mbox, phba->mbox_mem_pool);
2549 if (phba->sli_rev == LPFC_SLI_REV4)
2550 lpfc_sli4_free_rpi(phba, rpi);
2555 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2556 * @phba: Pointer to HBA context object
2557 * @rpi: Remote port login id
2558 * @txxri: Pointer to transmit exchange id
2559 * @rxxri: Pointer to response exchabge id
2561 * This function obtains the transmit and receive ids required to send
2562 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2563 * flags are used to the unsolicted response handler is able to process
2564 * the ct command sent on the same port.
2566 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2567 uint16_t *txxri, uint16_t * rxxri)
2569 struct lpfc_bsg_event *evt;
2570 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2572 struct lpfc_dmabuf *dmabuf;
2573 struct ulp_bde64 *bpl = NULL;
2574 struct lpfc_sli_ct_request *ctreq = NULL;
2577 int iocb_stat = IOCB_SUCCESS;
2578 unsigned long flags;
2582 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2583 SLI_CT_ELX_LOOPBACK);
2587 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2588 list_add(&evt->node, &phba->ct_ev_waiters);
2589 lpfc_bsg_event_ref(evt);
2590 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2592 cmdiocbq = lpfc_sli_get_iocbq(phba);
2593 rspiocbq = lpfc_sli_get_iocbq(phba);
2595 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2597 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2599 INIT_LIST_HEAD(&dmabuf->list);
2600 bpl = (struct ulp_bde64 *) dmabuf->virt;
2601 memset(bpl, 0, sizeof(*bpl));
2602 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2604 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2607 le32_to_cpu(putPaddrLow(dmabuf->phys +
2609 bpl->tus.f.bdeFlags = 0;
2610 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2611 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2615 if (cmdiocbq == NULL || rspiocbq == NULL ||
2616 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2617 dmabuf->virt == NULL) {
2619 goto err_get_xri_exit;
2622 cmd = &cmdiocbq->iocb;
2623 rsp = &rspiocbq->iocb;
2625 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2627 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2628 ctreq->RevisionId.bits.InId = 0;
2629 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2630 ctreq->FsSubType = 0;
2631 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2632 ctreq->CommandResponse.bits.Size = 0;
2635 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2636 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2637 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2638 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2640 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2641 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2642 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2643 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2645 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2646 cmd->ulpBdeCount = 1;
2648 cmd->ulpClass = CLASS3;
2649 cmd->ulpContext = rpi;
2651 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2652 cmdiocbq->vport = phba->pport;
2653 cmdiocbq->iocb_cmpl = NULL;
2655 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2657 (phba->fc_ratov * 2)
2658 + LPFC_DRVR_TIMEOUT);
2659 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOSTAT_SUCCESS)) {
2661 goto err_get_xri_exit;
2663 *txxri = rsp->ulpContext;
2666 evt->wait_time_stamp = jiffies;
2667 time_left = wait_event_interruptible_timeout(
2668 evt->wq, !list_empty(&evt->events_to_see),
2669 msecs_to_jiffies(1000 *
2670 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
2671 if (list_empty(&evt->events_to_see))
2672 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2674 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2675 list_move(evt->events_to_see.prev, &evt->events_to_get);
2676 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2677 *rxxri = (list_entry(evt->events_to_get.prev,
2678 typeof(struct event_data),
2684 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2685 lpfc_bsg_event_unref(evt); /* release ref */
2686 lpfc_bsg_event_unref(evt); /* delete */
2687 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2691 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2695 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2696 lpfc_sli_release_iocbq(phba, cmdiocbq);
2698 lpfc_sli_release_iocbq(phba, rspiocbq);
2703 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2704 * @phba: Pointer to HBA context object
2706 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2707 * returns the pointer to the buffer.
2709 static struct lpfc_dmabuf *
2710 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2712 struct lpfc_dmabuf *dmabuf;
2713 struct pci_dev *pcidev = phba->pcidev;
2715 /* allocate dma buffer struct */
2716 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2720 INIT_LIST_HEAD(&dmabuf->list);
2722 /* now, allocate dma buffer */
2723 dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2724 &(dmabuf->phys), GFP_KERNEL);
2726 if (!dmabuf->virt) {
2735 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2736 * @phba: Pointer to HBA context object.
2737 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2739 * This routine just simply frees a dma buffer and its associated buffer
2740 * descriptor referred by @dmabuf.
2743 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2745 struct pci_dev *pcidev = phba->pcidev;
2751 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2752 dmabuf->virt, dmabuf->phys);
2758 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2759 * @phba: Pointer to HBA context object.
2760 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2762 * This routine just simply frees all dma buffers and their associated buffer
2763 * descriptors referred by @dmabuf_list.
2766 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2767 struct list_head *dmabuf_list)
2769 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2771 if (list_empty(dmabuf_list))
2774 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2775 list_del_init(&dmabuf->list);
2776 lpfc_bsg_dma_page_free(phba, dmabuf);
2782 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2783 * @phba: Pointer to HBA context object
2784 * @bpl: Pointer to 64 bit bde structure
2785 * @size: Number of bytes to process
2786 * @nocopydata: Flag to copy user data into the allocated buffer
2788 * This function allocates page size buffers and populates an lpfc_dmabufext.
2789 * If allowed the user data pointed to with indataptr is copied into the kernel
2790 * memory. The chained list of page size buffers is returned.
2792 static struct lpfc_dmabufext *
2793 diag_cmd_data_alloc(struct lpfc_hba *phba,
2794 struct ulp_bde64 *bpl, uint32_t size,
2797 struct lpfc_dmabufext *mlist = NULL;
2798 struct lpfc_dmabufext *dmp;
2799 int cnt, offset = 0, i = 0;
2800 struct pci_dev *pcidev;
2802 pcidev = phba->pcidev;
2805 /* We get chunks of 4K */
2806 if (size > BUF_SZ_4K)
2811 /* allocate struct lpfc_dmabufext buffer header */
2812 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2816 INIT_LIST_HEAD(&dmp->dma.list);
2818 /* Queue it to a linked list */
2820 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2824 /* allocate buffer */
2825 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2836 bpl->tus.f.bdeFlags = 0;
2837 pci_dma_sync_single_for_device(phba->pcidev,
2838 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2841 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2842 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2845 /* build buffer ptr list for IOCB */
2846 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2847 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2848 bpl->tus.f.bdeSize = (ushort) cnt;
2849 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2862 diag_cmd_data_free(phba, mlist);
2867 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2868 * @phba: Pointer to HBA context object
2869 * @rxxri: Receive exchange id
2870 * @len: Number of data bytes
2872 * This function allocates and posts a data buffer of sufficient size to receive
2873 * an unsolicted CT command.
2875 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2878 struct lpfc_sli *psli = &phba->sli;
2879 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2880 struct lpfc_iocbq *cmdiocbq;
2882 struct list_head head, *curr, *next;
2883 struct lpfc_dmabuf *rxbmp;
2884 struct lpfc_dmabuf *dmp;
2885 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2886 struct ulp_bde64 *rxbpl = NULL;
2888 struct lpfc_dmabufext *rxbuffer = NULL;
2893 cmdiocbq = lpfc_sli_get_iocbq(phba);
2894 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2895 if (rxbmp != NULL) {
2896 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2898 INIT_LIST_HEAD(&rxbmp->list);
2899 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2900 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2904 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2906 goto err_post_rxbufs_exit;
2909 /* Queue buffers for the receive exchange */
2910 num_bde = (uint32_t)rxbuffer->flag;
2911 dmp = &rxbuffer->dma;
2913 cmd = &cmdiocbq->iocb;
2916 INIT_LIST_HEAD(&head);
2917 list_add_tail(&head, &dmp->list);
2918 list_for_each_safe(curr, next, &head) {
2919 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2922 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2923 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2924 cmd->un.quexri64cx.buff.bde.addrHigh =
2925 putPaddrHigh(mp[i]->phys);
2926 cmd->un.quexri64cx.buff.bde.addrLow =
2927 putPaddrLow(mp[i]->phys);
2928 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2929 ((struct lpfc_dmabufext *)mp[i])->size;
2930 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2931 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2934 cmd->ulpBdeCount = 1;
2935 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2938 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2939 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2940 cmd->un.cont64[i].tus.f.bdeSize =
2941 ((struct lpfc_dmabufext *)mp[i])->size;
2942 cmd->ulpBdeCount = ++i;
2944 if ((--num_bde > 0) && (i < 2))
2947 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2951 cmd->ulpClass = CLASS3;
2952 cmd->ulpContext = rxxri;
2954 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2956 if (iocb_stat == IOCB_ERROR) {
2957 diag_cmd_data_free(phba,
2958 (struct lpfc_dmabufext *)mp[0]);
2960 diag_cmd_data_free(phba,
2961 (struct lpfc_dmabufext *)mp[1]);
2962 dmp = list_entry(next, struct lpfc_dmabuf, list);
2964 goto err_post_rxbufs_exit;
2967 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2969 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2973 /* The iocb was freed by lpfc_sli_issue_iocb */
2974 cmdiocbq = lpfc_sli_get_iocbq(phba);
2976 dmp = list_entry(next, struct lpfc_dmabuf, list);
2978 goto err_post_rxbufs_exit;
2981 cmd = &cmdiocbq->iocb;
2986 err_post_rxbufs_exit:
2990 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2995 lpfc_sli_release_iocbq(phba, cmdiocbq);
3000 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
3001 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
3003 * This function receives a user data buffer to be transmitted and received on
3004 * the same port, the link must be up and in loopback mode prior
3006 * 1. A kernel buffer is allocated to copy the user data into.
3007 * 2. The port registers with "itself".
3008 * 3. The transmit and receive exchange ids are obtained.
3009 * 4. The receive exchange id is posted.
3010 * 5. A new els loopback event is created.
3011 * 6. The command and response iocbs are allocated.
3012 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
3014 * This function is meant to be called n times while the port is in loopback
3015 * so it is the apps responsibility to issue a reset to take the port out
3019 lpfc_bsg_diag_loopback_run(struct bsg_job *job)
3021 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3022 struct fc_bsg_reply *bsg_reply = job->reply;
3023 struct lpfc_hba *phba = vport->phba;
3024 struct lpfc_bsg_event *evt;
3025 struct event_data *evdat;
3026 struct lpfc_sli *psli = &phba->sli;
3029 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
3031 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
3032 IOCB_t *cmd, *rsp = NULL;
3033 struct lpfc_sli_ct_request *ctreq;
3034 struct lpfc_dmabuf *txbmp;
3035 struct ulp_bde64 *txbpl = NULL;
3036 struct lpfc_dmabufext *txbuffer = NULL;
3037 struct list_head head;
3038 struct lpfc_dmabuf *curr;
3039 uint16_t txxri = 0, rxxri;
3041 uint8_t *ptr = NULL, *rx_databuf = NULL;
3044 int iocb_stat = IOCB_SUCCESS;
3045 unsigned long flags;
3046 void *dataout = NULL;
3049 /* in case no data is returned return just the return code */
3050 bsg_reply->reply_payload_rcv_len = 0;
3052 if (job->request_len <
3053 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
3054 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3055 "2739 Received DIAG TEST request below minimum "
3058 goto loopback_test_exit;
3061 if (job->request_payload.payload_len !=
3062 job->reply_payload.payload_len) {
3064 goto loopback_test_exit;
3067 if ((phba->link_state == LPFC_HBA_ERROR) ||
3068 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
3069 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
3071 goto loopback_test_exit;
3074 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
3076 goto loopback_test_exit;
3079 size = job->request_payload.payload_len;
3080 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
3082 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
3084 goto loopback_test_exit;
3087 if (full_size >= BUF_SZ_4K) {
3089 * Allocate memory for ioctl data. If buffer is bigger than 64k,
3090 * then we allocate 64k and re-use that buffer over and over to
3091 * xfer the whole block. This is because Linux kernel has a
3092 * problem allocating more than 120k of kernel space memory. Saw
3093 * problem with GET_FCPTARGETMAPPING...
3095 if (size <= (64 * 1024))
3096 total_mem = full_size;
3098 total_mem = 64 * 1024;
3100 /* Allocate memory for ioctl data */
3101 total_mem = BUF_SZ_4K;
3103 dataout = kmalloc(total_mem, GFP_KERNEL);
3104 if (dataout == NULL) {
3106 goto loopback_test_exit;
3110 ptr += ELX_LOOPBACK_HEADER_SZ;
3111 sg_copy_to_buffer(job->request_payload.sg_list,
3112 job->request_payload.sg_cnt,
3114 rc = lpfcdiag_loop_self_reg(phba, &rpi);
3116 goto loopback_test_exit;
3118 if (phba->sli_rev < LPFC_SLI_REV4) {
3119 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
3121 lpfcdiag_loop_self_unreg(phba, rpi);
3122 goto loopback_test_exit;
3125 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
3127 lpfcdiag_loop_self_unreg(phba, rpi);
3128 goto loopback_test_exit;
3131 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
3132 SLI_CT_ELX_LOOPBACK);
3134 lpfcdiag_loop_self_unreg(phba, rpi);
3136 goto loopback_test_exit;
3139 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3140 list_add(&evt->node, &phba->ct_ev_waiters);
3141 lpfc_bsg_event_ref(evt);
3142 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3144 cmdiocbq = lpfc_sli_get_iocbq(phba);
3145 if (phba->sli_rev < LPFC_SLI_REV4)
3146 rspiocbq = lpfc_sli_get_iocbq(phba);
3147 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
3150 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
3152 INIT_LIST_HEAD(&txbmp->list);
3153 txbpl = (struct ulp_bde64 *) txbmp->virt;
3154 txbuffer = diag_cmd_data_alloc(phba,
3155 txbpl, full_size, 0);
3159 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
3161 goto err_loopback_test_exit;
3163 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
3165 goto err_loopback_test_exit;
3168 cmd = &cmdiocbq->iocb;
3169 if (phba->sli_rev < LPFC_SLI_REV4)
3170 rsp = &rspiocbq->iocb;
3172 INIT_LIST_HEAD(&head);
3173 list_add_tail(&head, &txbuffer->dma.list);
3174 list_for_each_entry(curr, &head, list) {
3175 segment_len = ((struct lpfc_dmabufext *)curr)->size;
3176 if (current_offset == 0) {
3178 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
3179 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
3180 ctreq->RevisionId.bits.InId = 0;
3181 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
3182 ctreq->FsSubType = 0;
3183 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3184 ctreq->CommandResponse.bits.Size = size;
3185 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3189 BUG_ON(segment_offset >= segment_len);
3190 memcpy(curr->virt + segment_offset,
3191 ptr + current_offset,
3192 segment_len - segment_offset);
3194 current_offset += segment_len - segment_offset;
3195 BUG_ON(current_offset > size);
3199 /* Build the XMIT_SEQUENCE iocb */
3200 num_bde = (uint32_t)txbuffer->flag;
3202 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3203 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3204 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3205 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3207 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3208 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3209 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3210 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3212 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3213 cmd->ulpBdeCount = 1;
3215 cmd->ulpClass = CLASS3;
3217 if (phba->sli_rev < LPFC_SLI_REV4) {
3218 cmd->ulpContext = txxri;
3220 cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3221 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3222 cmdiocbq->context3 = txbmp;
3223 cmdiocbq->sli4_xritag = NO_XRI;
3224 cmd->unsli3.rcvsli3.ox_id = 0xffff;
3226 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3227 cmdiocbq->iocb_flag |= LPFC_IO_LOOPBACK;
3228 cmdiocbq->vport = phba->pport;
3229 cmdiocbq->iocb_cmpl = NULL;
3230 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3231 rspiocbq, (phba->fc_ratov * 2) +
3234 if ((iocb_stat != IOCB_SUCCESS) ||
3235 ((phba->sli_rev < LPFC_SLI_REV4) &&
3236 (rsp->ulpStatus != IOSTAT_SUCCESS))) {
3237 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3238 "3126 Failed loopback test issue iocb: "
3239 "iocb_stat:x%x\n", iocb_stat);
3241 goto err_loopback_test_exit;
3245 time_left = wait_event_interruptible_timeout(
3246 evt->wq, !list_empty(&evt->events_to_see),
3247 msecs_to_jiffies(1000 *
3248 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT)));
3250 if (list_empty(&evt->events_to_see)) {
3251 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3252 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3253 "3125 Not receiving unsolicited event, "
3256 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3257 list_move(evt->events_to_see.prev, &evt->events_to_get);
3258 evdat = list_entry(evt->events_to_get.prev,
3259 typeof(*evdat), node);
3260 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3261 rx_databuf = evdat->data;
3262 if (evdat->len != full_size) {
3263 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3264 "1603 Loopback test did not receive expected "
3265 "data length. actual length 0x%x expected "
3267 evdat->len, full_size);
3269 } else if (rx_databuf == NULL)
3273 /* skip over elx loopback header */
3274 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3275 bsg_reply->reply_payload_rcv_len =
3276 sg_copy_from_buffer(job->reply_payload.sg_list,
3277 job->reply_payload.sg_cnt,
3279 bsg_reply->reply_payload_rcv_len = size;
3283 err_loopback_test_exit:
3284 lpfcdiag_loop_self_unreg(phba, rpi);
3286 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3287 lpfc_bsg_event_unref(evt); /* release ref */
3288 lpfc_bsg_event_unref(evt); /* delete */
3289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3291 if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
3292 lpfc_sli_release_iocbq(phba, cmdiocbq);
3294 if (rspiocbq != NULL)
3295 lpfc_sli_release_iocbq(phba, rspiocbq);
3297 if (txbmp != NULL) {
3298 if (txbpl != NULL) {
3299 if (txbuffer != NULL)
3300 diag_cmd_data_free(phba, txbuffer);
3301 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3308 /* make error code available to userspace */
3309 bsg_reply->result = rc;
3310 job->dd_data = NULL;
3311 /* complete the job back to userspace if no error */
3312 if (rc == IOCB_SUCCESS)
3313 bsg_job_done(job, bsg_reply->result,
3314 bsg_reply->reply_payload_rcv_len);
3319 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3320 * @job: GET_DFC_REV fc_bsg_job
3323 lpfc_bsg_get_dfc_rev(struct bsg_job *job)
3325 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
3326 struct fc_bsg_reply *bsg_reply = job->reply;
3327 struct lpfc_hba *phba = vport->phba;
3328 struct get_mgmt_rev_reply *event_reply;
3331 if (job->request_len <
3332 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3333 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3334 "2740 Received GET_DFC_REV request below "
3340 event_reply = (struct get_mgmt_rev_reply *)
3341 bsg_reply->reply_data.vendor_reply.vendor_rsp;
3343 if (job->reply_len <
3344 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3345 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3346 "2741 Received GET_DFC_REV reply below "
3352 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3353 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3355 bsg_reply->result = rc;
3357 bsg_job_done(job, bsg_reply->result,
3358 bsg_reply->reply_payload_rcv_len);
3363 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3364 * @phba: Pointer to HBA context object.
3365 * @pmboxq: Pointer to mailbox command.
3367 * This is completion handler function for mailbox commands issued from
3368 * lpfc_bsg_issue_mbox function. This function is called by the
3369 * mailbox event handler function with no lock held. This function
3370 * will wake up thread waiting on the wait queue pointed by context1
3374 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3376 struct bsg_job_data *dd_data;
3377 struct fc_bsg_reply *bsg_reply;
3378 struct bsg_job *job;
3380 unsigned long flags;
3381 uint8_t *pmb, *pmb_buf;
3383 dd_data = pmboxq->context1;
3386 * The outgoing buffer is readily referred from the dma buffer,
3387 * just need to get header part from mailboxq structure.
3389 pmb = (uint8_t *)&pmboxq->u.mb;
3390 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3391 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3393 /* Determine if job has been aborted */
3395 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3396 job = dd_data->set_job;
3398 /* Prevent timeout handling from trying to abort job */
3399 job->dd_data = NULL;
3401 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3403 /* Copy the mailbox data to the job if it is still active */
3406 bsg_reply = job->reply;
3407 size = job->reply_payload.payload_len;
3408 bsg_reply->reply_payload_rcv_len =
3409 sg_copy_from_buffer(job->reply_payload.sg_list,
3410 job->reply_payload.sg_cnt,
3414 dd_data->set_job = NULL;
3415 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3416 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3419 /* Complete the job if the job is still active */
3422 bsg_reply->result = 0;
3423 bsg_job_done(job, bsg_reply->result,
3424 bsg_reply->reply_payload_rcv_len);
3430 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3431 * @phba: Pointer to HBA context object.
3432 * @mb: Pointer to a mailbox object.
3433 * @vport: Pointer to a vport object.
3435 * Some commands require the port to be offline, some may not be called from
3438 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3439 MAILBOX_t *mb, struct lpfc_vport *vport)
3441 /* return negative error values for bsg job */
3442 switch (mb->mbxCommand) {
3446 case MBX_CONFIG_LINK:
3447 case MBX_CONFIG_RING:
3448 case MBX_RESET_RING:
3449 case MBX_UNREG_LOGIN:
3451 case MBX_DUMP_CONTEXT:
3455 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3456 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3457 "2743 Command 0x%x is illegal in on-line "
3463 case MBX_WRITE_VPARMS:
3466 case MBX_READ_CONFIG:
3467 case MBX_READ_RCONFIG:
3468 case MBX_READ_STATUS:
3471 case MBX_READ_LNK_STAT:
3472 case MBX_DUMP_MEMORY:
3474 case MBX_UPDATE_CFG:
3475 case MBX_KILL_BOARD:
3476 case MBX_READ_TOPOLOGY:
3478 case MBX_LOAD_EXP_ROM:
3480 case MBX_DEL_LD_ENTRY:
3483 case MBX_SLI4_CONFIG:
3484 case MBX_READ_EVENT_LOG:
3485 case MBX_READ_EVENT_LOG_STATUS:
3486 case MBX_WRITE_EVENT_LOG:
3487 case MBX_PORT_CAPABILITIES:
3488 case MBX_PORT_IOV_CONTROL:
3489 case MBX_RUN_BIU_DIAG64:
3491 case MBX_SET_VARIABLE:
3492 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3493 "1226 mbox: set_variable 0x%x, 0x%x\n",
3495 mb->un.varWords[1]);
3496 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3497 && (mb->un.varWords[1] == 1)) {
3498 phba->wait_4_mlo_maint_flg = 1;
3499 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3500 spin_lock_irq(&phba->hbalock);
3501 phba->link_flag &= ~LS_LOOPBACK_MODE;
3502 spin_unlock_irq(&phba->hbalock);
3503 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3506 case MBX_READ_SPARM64:
3508 case MBX_REG_LOGIN64:
3509 case MBX_CONFIG_PORT:
3510 case MBX_RUN_BIU_DIAG:
3512 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3513 "2742 Unknown Command 0x%x\n",
3522 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3523 * @phba: Pointer to HBA context object.
3525 * This is routine clean up and reset BSG handling of multi-buffer mbox
3529 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3531 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3534 /* free all memory, including dma buffers */
3535 lpfc_bsg_dma_page_list_free(phba,
3536 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3537 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3538 /* multi-buffer write mailbox command pass-through complete */
3539 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3540 sizeof(struct lpfc_mbox_ext_buf_ctx));
3541 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3547 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3548 * @phba: Pointer to HBA context object.
3549 * @pmboxq: Pointer to mailbox command.
3551 * This is routine handles BSG job for mailbox commands completions with
3552 * multiple external buffers.
3554 static struct bsg_job *
3555 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3557 struct bsg_job_data *dd_data;
3558 struct bsg_job *job;
3559 struct fc_bsg_reply *bsg_reply;
3560 uint8_t *pmb, *pmb_buf;
3561 unsigned long flags;
3564 struct lpfc_dmabuf *dmabuf;
3565 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3568 dd_data = pmboxq->context1;
3570 /* Determine if job has been aborted */
3571 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3572 job = dd_data->set_job;
3574 bsg_reply = job->reply;
3575 /* Prevent timeout handling from trying to abort job */
3576 job->dd_data = NULL;
3578 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3581 * The outgoing buffer is readily referred from the dma buffer,
3582 * just need to get header part from mailboxq structure.
3585 pmb = (uint8_t *)&pmboxq->u.mb;
3586 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3587 /* Copy the byte swapped response mailbox back to the user */
3588 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3589 /* if there is any non-embedded extended data copy that too */
3590 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3591 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3592 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3593 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3594 pmbx = (uint8_t *)dmabuf->virt;
3595 /* byte swap the extended data following the mailbox command */
3596 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3597 &pmbx[sizeof(MAILBOX_t)],
3598 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3601 /* Complete the job if the job is still active */
3604 size = job->reply_payload.payload_len;
3605 bsg_reply->reply_payload_rcv_len =
3606 sg_copy_from_buffer(job->reply_payload.sg_list,
3607 job->reply_payload.sg_cnt,
3610 /* result for successful */
3611 bsg_reply->result = 0;
3613 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3614 "2937 SLI_CONFIG ext-buffer maibox command "
3615 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3616 phba->mbox_ext_buf_ctx.nembType,
3617 phba->mbox_ext_buf_ctx.mboxType, size);
3618 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3619 phba->mbox_ext_buf_ctx.nembType,
3620 phba->mbox_ext_buf_ctx.mboxType,
3621 dma_ebuf, sta_pos_addr,
3622 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3624 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3625 "2938 SLI_CONFIG ext-buffer maibox "
3626 "command (x%x/x%x) failure, rc:x%x\n",
3627 phba->mbox_ext_buf_ctx.nembType,
3628 phba->mbox_ext_buf_ctx.mboxType, rc);
3633 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3639 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3640 * @phba: Pointer to HBA context object.
3641 * @pmboxq: Pointer to mailbox command.
3643 * This is completion handler function for mailbox read commands with multiple
3647 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3649 struct bsg_job *job;
3650 struct fc_bsg_reply *bsg_reply;
3652 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3654 /* handle the BSG job with mailbox command */
3656 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3658 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3659 "2939 SLI_CONFIG ext-buffer rd maibox command "
3660 "complete, ctxState:x%x, mbxStatus:x%x\n",
3661 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3663 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3664 lpfc_bsg_mbox_ext_session_reset(phba);
3666 /* free base driver mailbox structure memory */
3667 mempool_free(pmboxq, phba->mbox_mem_pool);
3669 /* if the job is still active, call job done */
3671 bsg_reply = job->reply;
3672 bsg_job_done(job, bsg_reply->result,
3673 bsg_reply->reply_payload_rcv_len);
3679 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3680 * @phba: Pointer to HBA context object.
3681 * @pmboxq: Pointer to mailbox command.
3683 * This is completion handler function for mailbox write commands with multiple
3687 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3689 struct bsg_job *job;
3690 struct fc_bsg_reply *bsg_reply;
3692 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3694 /* handle the BSG job with the mailbox command */
3696 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3698 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3699 "2940 SLI_CONFIG ext-buffer wr maibox command "
3700 "complete, ctxState:x%x, mbxStatus:x%x\n",
3701 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3703 /* free all memory, including dma buffers */
3704 mempool_free(pmboxq, phba->mbox_mem_pool);
3705 lpfc_bsg_mbox_ext_session_reset(phba);
3707 /* if the job is still active, call job done */
3709 bsg_reply = job->reply;
3710 bsg_job_done(job, bsg_reply->result,
3711 bsg_reply->reply_payload_rcv_len);
3718 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3719 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3720 struct lpfc_dmabuf *ext_dmabuf)
3722 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3724 /* pointer to the start of mailbox command */
3725 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3727 if (nemb_tp == nemb_mse) {
3729 sli_cfg_mbx->un.sli_config_emb0_subsys.
3731 putPaddrHigh(mbx_dmabuf->phys +
3733 sli_cfg_mbx->un.sli_config_emb0_subsys.
3735 putPaddrLow(mbx_dmabuf->phys +
3737 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3738 "2943 SLI_CONFIG(mse)[%d], "
3739 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3741 sli_cfg_mbx->un.sli_config_emb0_subsys.
3743 sli_cfg_mbx->un.sli_config_emb0_subsys.
3745 sli_cfg_mbx->un.sli_config_emb0_subsys.
3748 sli_cfg_mbx->un.sli_config_emb0_subsys.
3750 putPaddrHigh(ext_dmabuf->phys);
3751 sli_cfg_mbx->un.sli_config_emb0_subsys.
3753 putPaddrLow(ext_dmabuf->phys);
3754 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3755 "2944 SLI_CONFIG(mse)[%d], "
3756 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3758 sli_cfg_mbx->un.sli_config_emb0_subsys.
3760 sli_cfg_mbx->un.sli_config_emb0_subsys.
3762 sli_cfg_mbx->un.sli_config_emb0_subsys.
3767 sli_cfg_mbx->un.sli_config_emb1_subsys.
3769 putPaddrHigh(mbx_dmabuf->phys +
3771 sli_cfg_mbx->un.sli_config_emb1_subsys.
3773 putPaddrLow(mbx_dmabuf->phys +
3775 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3776 "3007 SLI_CONFIG(hbd)[%d], "
3777 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3779 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3781 sli_config_emb1_subsys.hbd[index]),
3782 sli_cfg_mbx->un.sli_config_emb1_subsys.
3784 sli_cfg_mbx->un.sli_config_emb1_subsys.
3788 sli_cfg_mbx->un.sli_config_emb1_subsys.
3790 putPaddrHigh(ext_dmabuf->phys);
3791 sli_cfg_mbx->un.sli_config_emb1_subsys.
3793 putPaddrLow(ext_dmabuf->phys);
3794 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3795 "3008 SLI_CONFIG(hbd)[%d], "
3796 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3798 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3800 sli_config_emb1_subsys.hbd[index]),
3801 sli_cfg_mbx->un.sli_config_emb1_subsys.
3803 sli_cfg_mbx->un.sli_config_emb1_subsys.
3811 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3812 * @phba: Pointer to HBA context object.
3813 * @mb: Pointer to a BSG mailbox object.
3814 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3815 * @dmabuff: Pointer to a DMA buffer descriptor.
3817 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3818 * non-embedded external bufffers.
3821 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
3822 enum nemb_type nemb_tp,
3823 struct lpfc_dmabuf *dmabuf)
3825 struct fc_bsg_request *bsg_request = job->request;
3826 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3827 struct dfc_mbox_req *mbox_req;
3828 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3829 uint32_t ext_buf_cnt, ext_buf_index;
3830 struct lpfc_dmabuf *ext_dmabuf = NULL;
3831 struct bsg_job_data *dd_data = NULL;
3832 LPFC_MBOXQ_t *pmboxq = NULL;
3838 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
3840 /* pointer to the start of mailbox command */
3841 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3843 if (nemb_tp == nemb_mse) {
3844 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3845 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3846 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3847 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3848 "2945 Handled SLI_CONFIG(mse) rd, "
3849 "ext_buf_cnt(%d) out of range(%d)\n",
3851 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3855 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3856 "2941 Handled SLI_CONFIG(mse) rd, "
3857 "ext_buf_cnt:%d\n", ext_buf_cnt);
3859 /* sanity check on interface type for support */
3860 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3861 LPFC_SLI_INTF_IF_TYPE_2) {
3865 /* nemb_tp == nemb_hbd */
3866 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3867 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3868 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3869 "2946 Handled SLI_CONFIG(hbd) rd, "
3870 "ext_buf_cnt(%d) out of range(%d)\n",
3872 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3876 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3877 "2942 Handled SLI_CONFIG(hbd) rd, "
3878 "ext_buf_cnt:%d\n", ext_buf_cnt);
3881 /* before dma descriptor setup */
3882 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3883 sta_pre_addr, dmabuf, ext_buf_cnt);
3885 /* reject non-embedded mailbox command with none external buffer */
3886 if (ext_buf_cnt == 0) {
3889 } else if (ext_buf_cnt > 1) {
3890 /* additional external read buffers */
3891 for (i = 1; i < ext_buf_cnt; i++) {
3892 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3897 list_add_tail(&ext_dmabuf->list,
3898 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3902 /* bsg tracking structure */
3903 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3909 /* mailbox command structure for base driver */
3910 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3915 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3917 /* for the first external buffer */
3918 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3920 /* for the rest of external buffer descriptors if any */
3921 if (ext_buf_cnt > 1) {
3923 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3924 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3925 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3926 ext_buf_index, dmabuf,
3932 /* after dma descriptor setup */
3933 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3934 sta_pos_addr, dmabuf, ext_buf_cnt);
3936 /* construct base driver mbox command */
3937 pmb = &pmboxq->u.mb;
3938 pmbx = (uint8_t *)dmabuf->virt;
3939 memcpy(pmb, pmbx, sizeof(*pmb));
3940 pmb->mbxOwner = OWN_HOST;
3941 pmboxq->vport = phba->pport;
3943 /* multi-buffer handling context */
3944 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3945 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3946 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3947 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3948 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3949 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3951 /* callback for multi-buffer read mailbox command */
3952 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3954 /* context fields to callback function */
3955 pmboxq->context1 = dd_data;
3956 dd_data->type = TYPE_MBOX;
3957 dd_data->set_job = job;
3958 dd_data->context_un.mbox.pmboxq = pmboxq;
3959 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3960 job->dd_data = dd_data;
3963 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3966 * Non-embedded mailbox subcommand data gets byte swapped here because
3967 * the lower level driver code only does the first 64 mailbox words.
3969 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3970 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3971 (nemb_tp == nemb_mse))
3972 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3973 &pmbx[sizeof(MAILBOX_t)],
3974 sli_cfg_mbx->un.sli_config_emb0_subsys.
3977 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3978 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3979 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3980 "2947 Issued SLI_CONFIG ext-buffer "
3981 "maibox command, rc:x%x\n", rc);
3982 return SLI_CONFIG_HANDLED;
3984 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3985 "2948 Failed to issue SLI_CONFIG ext-buffer "
3986 "maibox command, rc:x%x\n", rc);
3991 mempool_free(pmboxq, phba->mbox_mem_pool);
3992 lpfc_bsg_dma_page_list_free(phba,
3993 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3995 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4000 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
4001 * @phba: Pointer to HBA context object.
4002 * @mb: Pointer to a BSG mailbox object.
4003 * @dmabuff: Pointer to a DMA buffer descriptor.
4005 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
4006 * non-embedded external bufffers.
4009 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct bsg_job *job,
4010 enum nemb_type nemb_tp,
4011 struct lpfc_dmabuf *dmabuf)
4013 struct fc_bsg_request *bsg_request = job->request;
4014 struct fc_bsg_reply *bsg_reply = job->reply;
4015 struct dfc_mbox_req *mbox_req;
4016 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4017 uint32_t ext_buf_cnt;
4018 struct bsg_job_data *dd_data = NULL;
4019 LPFC_MBOXQ_t *pmboxq = NULL;
4022 int rc = SLI_CONFIG_NOT_HANDLED, i;
4025 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4027 /* pointer to the start of mailbox command */
4028 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4030 if (nemb_tp == nemb_mse) {
4031 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
4032 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
4033 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
4034 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4035 "2953 Failed SLI_CONFIG(mse) wr, "
4036 "ext_buf_cnt(%d) out of range(%d)\n",
4038 LPFC_MBX_SLI_CONFIG_MAX_MSE);
4041 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4042 "2949 Handled SLI_CONFIG(mse) wr, "
4043 "ext_buf_cnt:%d\n", ext_buf_cnt);
4045 /* sanity check on interface type for support */
4046 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
4047 LPFC_SLI_INTF_IF_TYPE_2)
4049 /* nemb_tp == nemb_hbd */
4050 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
4051 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
4052 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4053 "2954 Failed SLI_CONFIG(hbd) wr, "
4054 "ext_buf_cnt(%d) out of range(%d)\n",
4056 LPFC_MBX_SLI_CONFIG_MAX_HBD);
4059 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4060 "2950 Handled SLI_CONFIG(hbd) wr, "
4061 "ext_buf_cnt:%d\n", ext_buf_cnt);
4064 /* before dma buffer descriptor setup */
4065 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4066 sta_pre_addr, dmabuf, ext_buf_cnt);
4068 if (ext_buf_cnt == 0)
4071 /* for the first external buffer */
4072 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
4074 /* after dma descriptor setup */
4075 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
4076 sta_pos_addr, dmabuf, ext_buf_cnt);
4078 /* log for looking forward */
4079 for (i = 1; i < ext_buf_cnt; i++) {
4080 if (nemb_tp == nemb_mse)
4081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4082 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
4083 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
4086 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4087 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
4088 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4089 &sli_cfg_mbx->un.sli_config_emb1_subsys.
4093 /* multi-buffer handling context */
4094 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
4095 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
4096 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
4097 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
4098 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
4099 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
4101 if (ext_buf_cnt == 1) {
4102 /* bsg tracking structure */
4103 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4109 /* mailbox command structure for base driver */
4110 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4115 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4116 pmb = &pmboxq->u.mb;
4117 mbx = (uint8_t *)dmabuf->virt;
4118 memcpy(pmb, mbx, sizeof(*pmb));
4119 pmb->mbxOwner = OWN_HOST;
4120 pmboxq->vport = phba->pport;
4122 /* callback for multi-buffer read mailbox command */
4123 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4125 /* context fields to callback function */
4126 pmboxq->context1 = dd_data;
4127 dd_data->type = TYPE_MBOX;
4128 dd_data->set_job = job;
4129 dd_data->context_un.mbox.pmboxq = pmboxq;
4130 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
4131 job->dd_data = dd_data;
4135 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4136 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4137 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4138 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4139 "2955 Issued SLI_CONFIG ext-buffer "
4140 "maibox command, rc:x%x\n", rc);
4141 return SLI_CONFIG_HANDLED;
4143 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4144 "2956 Failed to issue SLI_CONFIG ext-buffer "
4145 "maibox command, rc:x%x\n", rc);
4150 /* wait for additoinal external buffers */
4152 bsg_reply->result = 0;
4153 bsg_job_done(job, bsg_reply->result,
4154 bsg_reply->reply_payload_rcv_len);
4155 return SLI_CONFIG_HANDLED;
4159 mempool_free(pmboxq, phba->mbox_mem_pool);
4166 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
4167 * @phba: Pointer to HBA context object.
4168 * @mb: Pointer to a BSG mailbox object.
4169 * @dmabuff: Pointer to a DMA buffer descriptor.
4171 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
4172 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
4173 * with embedded sussystem 0x1 and opcodes with external HBDs.
4176 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4177 struct lpfc_dmabuf *dmabuf)
4179 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4182 int rc = SLI_CONFIG_NOT_HANDLED;
4184 /* state change on new multi-buffer pass-through mailbox command */
4185 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
4187 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
4189 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
4190 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
4191 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
4192 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4193 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
4194 &sli_cfg_mbx->un.sli_config_emb0_subsys);
4195 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
4197 case FCOE_OPCODE_READ_FCF:
4198 case FCOE_OPCODE_GET_DPORT_RESULTS:
4199 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4200 "2957 Handled SLI_CONFIG "
4201 "subsys_fcoe, opcode:x%x\n",
4203 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4206 case FCOE_OPCODE_ADD_FCF:
4207 case FCOE_OPCODE_SET_DPORT_MODE:
4208 case LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE:
4209 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4210 "2958 Handled SLI_CONFIG "
4211 "subsys_fcoe, opcode:x%x\n",
4213 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4217 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4218 "2959 Reject SLI_CONFIG "
4219 "subsys_fcoe, opcode:x%x\n",
4224 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4226 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4227 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4228 case COMN_OPCODE_GET_PROFILE_CONFIG:
4229 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4230 "3106 Handled SLI_CONFIG "
4231 "subsys_comn, opcode:x%x\n",
4233 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4237 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4238 "3107 Reject SLI_CONFIG "
4239 "subsys_comn, opcode:x%x\n",
4245 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4246 "2977 Reject SLI_CONFIG "
4247 "subsys:x%d, opcode:x%x\n",
4252 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4253 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4254 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4255 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4256 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4258 case COMN_OPCODE_READ_OBJECT:
4259 case COMN_OPCODE_READ_OBJECT_LIST:
4260 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4261 "2960 Handled SLI_CONFIG "
4262 "subsys_comn, opcode:x%x\n",
4264 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4267 case COMN_OPCODE_WRITE_OBJECT:
4268 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4269 "2961 Handled SLI_CONFIG "
4270 "subsys_comn, opcode:x%x\n",
4272 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4276 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4277 "2962 Not handled SLI_CONFIG "
4278 "subsys_comn, opcode:x%x\n",
4280 rc = SLI_CONFIG_NOT_HANDLED;
4284 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4285 "2978 Not handled SLI_CONFIG "
4286 "subsys:x%d, opcode:x%x\n",
4288 rc = SLI_CONFIG_NOT_HANDLED;
4292 /* state reset on not handled new multi-buffer mailbox command */
4293 if (rc != SLI_CONFIG_HANDLED)
4294 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4300 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4301 * @phba: Pointer to HBA context object.
4303 * This routine is for requesting to abort a pass-through mailbox command with
4304 * multiple external buffers due to error condition.
4307 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4309 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4310 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4312 lpfc_bsg_mbox_ext_session_reset(phba);
4317 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4318 * @phba: Pointer to HBA context object.
4319 * @dmabuf: Pointer to a DMA buffer descriptor.
4321 * This routine extracts the next mailbox read external buffer back to
4322 * user space through BSG.
4325 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct bsg_job *job)
4327 struct fc_bsg_reply *bsg_reply = job->reply;
4328 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4329 struct lpfc_dmabuf *dmabuf;
4334 index = phba->mbox_ext_buf_ctx.seqNum;
4335 phba->mbox_ext_buf_ctx.seqNum++;
4337 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4338 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4340 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4341 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4342 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4343 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4344 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4345 "buffer[%d], size:%d\n", index, size);
4347 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4348 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4349 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4350 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4351 "buffer[%d], size:%d\n", index, size);
4353 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4355 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4356 struct lpfc_dmabuf, list);
4357 list_del_init(&dmabuf->list);
4359 /* after dma buffer descriptor setup */
4360 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4361 mbox_rd, dma_ebuf, sta_pos_addr,
4364 pbuf = (uint8_t *)dmabuf->virt;
4365 bsg_reply->reply_payload_rcv_len =
4366 sg_copy_from_buffer(job->reply_payload.sg_list,
4367 job->reply_payload.sg_cnt,
4370 lpfc_bsg_dma_page_free(phba, dmabuf);
4372 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4373 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4374 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4375 "command session done\n");
4376 lpfc_bsg_mbox_ext_session_reset(phba);
4379 bsg_reply->result = 0;
4380 bsg_job_done(job, bsg_reply->result,
4381 bsg_reply->reply_payload_rcv_len);
4383 return SLI_CONFIG_HANDLED;
4387 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4388 * @phba: Pointer to HBA context object.
4389 * @dmabuf: Pointer to a DMA buffer descriptor.
4391 * This routine sets up the next mailbox read external buffer obtained
4392 * from user space through BSG.
4395 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job,
4396 struct lpfc_dmabuf *dmabuf)
4398 struct fc_bsg_reply *bsg_reply = job->reply;
4399 struct bsg_job_data *dd_data = NULL;
4400 LPFC_MBOXQ_t *pmboxq = NULL;
4402 enum nemb_type nemb_tp;
4408 index = phba->mbox_ext_buf_ctx.seqNum;
4409 phba->mbox_ext_buf_ctx.seqNum++;
4410 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4412 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4418 pbuf = (uint8_t *)dmabuf->virt;
4419 size = job->request_payload.payload_len;
4420 sg_copy_to_buffer(job->request_payload.sg_list,
4421 job->request_payload.sg_cnt,
4424 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4425 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4426 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4427 "buffer[%d], size:%d\n",
4428 phba->mbox_ext_buf_ctx.seqNum, size);
4431 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4432 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4433 "buffer[%d], size:%d\n",
4434 phba->mbox_ext_buf_ctx.seqNum, size);
4438 /* set up external buffer descriptor and add to external buffer list */
4439 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4440 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4442 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4444 /* after write dma buffer */
4445 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4446 mbox_wr, dma_ebuf, sta_pos_addr,
4449 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4450 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4451 "2968 SLI_CONFIG ext-buffer wr all %d "
4452 "ebuffers received\n",
4453 phba->mbox_ext_buf_ctx.numBuf);
4454 /* mailbox command structure for base driver */
4455 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4460 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4461 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4462 pmb = &pmboxq->u.mb;
4463 memcpy(pmb, pbuf, sizeof(*pmb));
4464 pmb->mbxOwner = OWN_HOST;
4465 pmboxq->vport = phba->pport;
4467 /* callback for multi-buffer write mailbox command */
4468 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4470 /* context fields to callback function */
4471 pmboxq->context1 = dd_data;
4472 dd_data->type = TYPE_MBOX;
4473 dd_data->set_job = job;
4474 dd_data->context_un.mbox.pmboxq = pmboxq;
4475 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4476 job->dd_data = dd_data;
4479 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4481 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4482 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4483 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4484 "2969 Issued SLI_CONFIG ext-buffer "
4485 "maibox command, rc:x%x\n", rc);
4486 return SLI_CONFIG_HANDLED;
4488 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4489 "2970 Failed to issue SLI_CONFIG ext-buffer "
4490 "maibox command, rc:x%x\n", rc);
4495 /* wait for additoinal external buffers */
4496 bsg_reply->result = 0;
4497 bsg_job_done(job, bsg_reply->result,
4498 bsg_reply->reply_payload_rcv_len);
4499 return SLI_CONFIG_HANDLED;
4502 lpfc_bsg_dma_page_free(phba, dmabuf);
4509 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4510 * @phba: Pointer to HBA context object.
4511 * @mb: Pointer to a BSG mailbox object.
4512 * @dmabuff: Pointer to a DMA buffer descriptor.
4514 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4515 * command with multiple non-embedded external buffers.
4518 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct bsg_job *job,
4519 struct lpfc_dmabuf *dmabuf)
4523 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4524 "2971 SLI_CONFIG buffer (type:x%x)\n",
4525 phba->mbox_ext_buf_ctx.mboxType);
4527 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4528 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4529 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4530 "2972 SLI_CONFIG rd buffer state "
4532 phba->mbox_ext_buf_ctx.state);
4533 lpfc_bsg_mbox_ext_abort(phba);
4536 rc = lpfc_bsg_read_ebuf_get(phba, job);
4537 if (rc == SLI_CONFIG_HANDLED)
4538 lpfc_bsg_dma_page_free(phba, dmabuf);
4539 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4540 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4541 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4542 "2973 SLI_CONFIG wr buffer state "
4544 phba->mbox_ext_buf_ctx.state);
4545 lpfc_bsg_mbox_ext_abort(phba);
4548 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4554 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4555 * @phba: Pointer to HBA context object.
4556 * @mb: Pointer to a BSG mailbox object.
4557 * @dmabuff: Pointer to a DMA buffer descriptor.
4559 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4560 * (0x9B) mailbox commands and external buffers.
4563 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct bsg_job *job,
4564 struct lpfc_dmabuf *dmabuf)
4566 struct fc_bsg_request *bsg_request = job->request;
4567 struct dfc_mbox_req *mbox_req;
4568 int rc = SLI_CONFIG_NOT_HANDLED;
4571 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4573 /* mbox command with/without single external buffer */
4574 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4577 /* mbox command and first external buffer */
4578 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4579 if (mbox_req->extSeqNum == 1) {
4580 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4581 "2974 SLI_CONFIG mailbox: tag:%d, "
4582 "seq:%d\n", mbox_req->extMboxTag,
4583 mbox_req->extSeqNum);
4584 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4587 goto sli_cfg_ext_error;
4591 * handle additional external buffers
4594 /* check broken pipe conditions */
4595 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4596 goto sli_cfg_ext_error;
4597 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4598 goto sli_cfg_ext_error;
4599 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4600 goto sli_cfg_ext_error;
4602 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4603 "2975 SLI_CONFIG mailbox external buffer: "
4604 "extSta:x%x, tag:%d, seq:%d\n",
4605 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4606 mbox_req->extSeqNum);
4607 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4611 /* all other cases, broken pipe */
4612 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4613 "2976 SLI_CONFIG mailbox broken pipe: "
4614 "ctxSta:x%x, ctxNumBuf:%d "
4615 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4616 phba->mbox_ext_buf_ctx.state,
4617 phba->mbox_ext_buf_ctx.numBuf,
4618 phba->mbox_ext_buf_ctx.mbxTag,
4619 phba->mbox_ext_buf_ctx.seqNum,
4620 mbox_req->extMboxTag, mbox_req->extSeqNum);
4622 lpfc_bsg_mbox_ext_session_reset(phba);
4628 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4629 * @phba: Pointer to HBA context object.
4630 * @mb: Pointer to a mailbox object.
4631 * @vport: Pointer to a vport object.
4633 * Allocate a tracking object, mailbox command memory, get a mailbox
4634 * from the mailbox pool, copy the caller mailbox command.
4636 * If offline and the sli is active we need to poll for the command (port is
4637 * being reset) and com-plete the job, otherwise issue the mailbox command and
4638 * let our completion handler finish the command.
4641 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct bsg_job *job,
4642 struct lpfc_vport *vport)
4644 struct fc_bsg_request *bsg_request = job->request;
4645 struct fc_bsg_reply *bsg_reply = job->reply;
4646 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4647 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4648 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4649 uint8_t *pmbx = NULL;
4650 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4651 struct lpfc_dmabuf *dmabuf = NULL;
4652 struct dfc_mbox_req *mbox_req;
4653 struct READ_EVENT_LOG_VAR *rdEventLog;
4654 uint32_t transmit_length, receive_length, mode;
4655 struct lpfc_mbx_sli4_config *sli4_config;
4656 struct lpfc_mbx_nembed_cmd *nembed_sge;
4657 struct ulp_bde64 *bde;
4658 uint8_t *ext = NULL;
4663 /* in case no data is transferred */
4664 bsg_reply->reply_payload_rcv_len = 0;
4666 /* sanity check to protect driver */
4667 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4668 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4674 * Don't allow mailbox commands to be sent when blocked or when in
4675 * the middle of discovery
4677 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4683 (struct dfc_mbox_req *)bsg_request->rqst_data.h_vendor.vendor_cmd;
4685 /* check if requested extended data lengths are valid */
4686 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4687 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4692 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4693 if (!dmabuf || !dmabuf->virt) {
4698 /* Get the mailbox command or external buffer from BSG */
4699 pmbx = (uint8_t *)dmabuf->virt;
4700 size = job->request_payload.payload_len;
4701 sg_copy_to_buffer(job->request_payload.sg_list,
4702 job->request_payload.sg_cnt, pmbx, size);
4704 /* Handle possible SLI_CONFIG with non-embedded payloads */
4705 if (phba->sli_rev == LPFC_SLI_REV4) {
4706 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4707 if (rc == SLI_CONFIG_HANDLED)
4711 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4714 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4716 goto job_done; /* must be negative */
4718 /* allocate our bsg tracking structure */
4719 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4721 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4722 "2727 Failed allocation of dd_data\n");
4727 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4732 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4734 pmb = &pmboxq->u.mb;
4735 memcpy(pmb, pmbx, sizeof(*pmb));
4736 pmb->mbxOwner = OWN_HOST;
4737 pmboxq->vport = vport;
4739 /* If HBA encountered an error attention, allow only DUMP
4740 * or RESTART mailbox commands until the HBA is restarted.
4742 if (phba->pport->stopped &&
4743 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4744 pmb->mbxCommand != MBX_RESTART &&
4745 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4746 pmb->mbxCommand != MBX_WRITE_WWN)
4747 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4748 "2797 mbox: Issued mailbox cmd "
4749 "0x%x while in stopped state.\n",
4752 /* extended mailbox commands will need an extended buffer */
4753 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4755 ext = from + sizeof(MAILBOX_t);
4756 pmboxq->context2 = ext;
4757 pmboxq->in_ext_byte_len =
4758 mbox_req->inExtWLen * sizeof(uint32_t);
4759 pmboxq->out_ext_byte_len =
4760 mbox_req->outExtWLen * sizeof(uint32_t);
4761 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4764 /* biu diag will need a kernel buffer to transfer the data
4765 * allocate our own buffer and setup the mailbox command to
4768 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4769 transmit_length = pmb->un.varWords[1];
4770 receive_length = pmb->un.varWords[4];
4771 /* transmit length cannot be greater than receive length or
4772 * mailbox extension size
4774 if ((transmit_length > receive_length) ||
4775 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4779 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4780 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4781 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4782 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4784 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4785 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4786 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4787 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4788 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4789 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4790 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4791 rdEventLog = &pmb->un.varRdEventLog;
4792 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4793 mode = bf_get(lpfc_event_log, rdEventLog);
4795 /* receive length cannot be greater than mailbox
4798 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4803 /* mode zero uses a bde like biu diags command */
4805 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4806 + sizeof(MAILBOX_t));
4807 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4808 + sizeof(MAILBOX_t));
4810 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4811 /* Let type 4 (well known data) through because the data is
4812 * returned in varwords[4-8]
4813 * otherwise check the recieve length and fetch the buffer addr
4815 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4816 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4817 /* rebuild the command for sli4 using our own buffers
4818 * like we do for biu diags
4820 receive_length = pmb->un.varWords[2];
4821 /* receive length cannot be greater than mailbox
4824 if (receive_length == 0) {
4828 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4829 + sizeof(MAILBOX_t));
4830 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4831 + sizeof(MAILBOX_t));
4832 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4833 pmb->un.varUpdateCfg.co) {
4834 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4836 /* bde size cannot be greater than mailbox ext size */
4837 if (bde->tus.f.bdeSize >
4838 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4842 bde->addrHigh = putPaddrHigh(dmabuf->phys
4843 + sizeof(MAILBOX_t));
4844 bde->addrLow = putPaddrLow(dmabuf->phys
4845 + sizeof(MAILBOX_t));
4846 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4847 /* Handling non-embedded SLI_CONFIG mailbox command */
4848 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4849 if (!bf_get(lpfc_mbox_hdr_emb,
4850 &sli4_config->header.cfg_mhdr)) {
4851 /* rebuild the command for sli4 using our
4852 * own buffers like we do for biu diags
4854 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4855 &pmb->un.varWords[0];
4856 receive_length = nembed_sge->sge[0].length;
4858 /* receive length cannot be greater than
4859 * mailbox extension size
4861 if ((receive_length == 0) ||
4863 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4868 nembed_sge->sge[0].pa_hi =
4869 putPaddrHigh(dmabuf->phys
4870 + sizeof(MAILBOX_t));
4871 nembed_sge->sge[0].pa_lo =
4872 putPaddrLow(dmabuf->phys
4873 + sizeof(MAILBOX_t));
4878 dd_data->context_un.mbox.dmabuffers = dmabuf;
4880 /* setup wake call as IOCB callback */
4881 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4883 /* setup context field to pass wait_queue pointer to wake function */
4884 pmboxq->context1 = dd_data;
4885 dd_data->type = TYPE_MBOX;
4886 dd_data->set_job = job;
4887 dd_data->context_un.mbox.pmboxq = pmboxq;
4888 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4889 dd_data->context_un.mbox.ext = ext;
4890 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4891 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4892 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4893 job->dd_data = dd_data;
4895 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4896 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4897 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4898 if (rc != MBX_SUCCESS) {
4899 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4903 /* job finished, copy the data */
4904 memcpy(pmbx, pmb, sizeof(*pmb));
4905 bsg_reply->reply_payload_rcv_len =
4906 sg_copy_from_buffer(job->reply_payload.sg_list,
4907 job->reply_payload.sg_cnt,
4909 /* not waiting mbox already done */
4914 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4915 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4916 return 1; /* job started */
4919 /* common exit for error or job completed inline */
4921 mempool_free(pmboxq, phba->mbox_mem_pool);
4922 lpfc_bsg_dma_page_free(phba, dmabuf);
4930 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4931 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4934 lpfc_bsg_mbox_cmd(struct bsg_job *job)
4936 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
4937 struct fc_bsg_request *bsg_request = job->request;
4938 struct fc_bsg_reply *bsg_reply = job->reply;
4939 struct lpfc_hba *phba = vport->phba;
4940 struct dfc_mbox_req *mbox_req;
4943 /* mix-and-match backward compatibility */
4944 bsg_reply->reply_payload_rcv_len = 0;
4945 if (job->request_len <
4946 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4947 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4948 "2737 Mix-and-match backward compatibility "
4949 "between MBOX_REQ old size:%d and "
4950 "new request size:%d\n",
4951 (int)(job->request_len -
4952 sizeof(struct fc_bsg_request)),
4953 (int)sizeof(struct dfc_mbox_req));
4954 mbox_req = (struct dfc_mbox_req *)
4955 bsg_request->rqst_data.h_vendor.vendor_cmd;
4956 mbox_req->extMboxTag = 0;
4957 mbox_req->extSeqNum = 0;
4960 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4964 bsg_reply->result = 0;
4965 job->dd_data = NULL;
4966 bsg_job_done(job, bsg_reply->result,
4967 bsg_reply->reply_payload_rcv_len);
4969 /* job submitted, will complete later*/
4970 rc = 0; /* return zero, no error */
4972 /* some error occurred */
4973 bsg_reply->result = rc;
4974 job->dd_data = NULL;
4981 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4982 * @phba: Pointer to HBA context object.
4983 * @cmdiocbq: Pointer to command iocb.
4984 * @rspiocbq: Pointer to response iocb.
4986 * This function is the completion handler for iocbs issued using
4987 * lpfc_menlo_cmd function. This function is called by the
4988 * ring event handler function without any lock held. This function
4989 * can be called from both worker thread context and interrupt
4990 * context. This function also can be called from another thread which
4991 * cleans up the SLI layer objects.
4992 * This function copies the contents of the response iocb to the
4993 * response iocb memory object provided by the caller of
4994 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4995 * sleeps for the iocb completion.
4998 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4999 struct lpfc_iocbq *cmdiocbq,
5000 struct lpfc_iocbq *rspiocbq)
5002 struct bsg_job_data *dd_data;
5003 struct bsg_job *job;
5004 struct fc_bsg_reply *bsg_reply;
5006 struct lpfc_dmabuf *bmp, *cmp, *rmp;
5007 struct lpfc_bsg_menlo *menlo;
5008 unsigned long flags;
5009 struct menlo_response *menlo_resp;
5010 unsigned int rsp_size;
5013 dd_data = cmdiocbq->context1;
5014 cmp = cmdiocbq->context2;
5015 bmp = cmdiocbq->context3;
5016 menlo = &dd_data->context_un.menlo;
5018 rsp = &rspiocbq->iocb;
5020 /* Determine if job has been aborted */
5021 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5022 job = dd_data->set_job;
5024 bsg_reply = job->reply;
5025 /* Prevent timeout handling from trying to abort job */
5026 job->dd_data = NULL;
5028 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5030 /* Copy the job data or set the failing status for the job */
5033 /* always return the xri, this would be used in the case
5034 * of a menlo download to allow the data to be sent as a
5035 * continuation of the exchange.
5038 menlo_resp = (struct menlo_response *)
5039 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5040 menlo_resp->xri = rsp->ulpContext;
5041 if (rsp->ulpStatus) {
5042 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
5043 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
5044 case IOERR_SEQUENCE_TIMEOUT:
5047 case IOERR_INVALID_RPI:
5058 rsp_size = rsp->un.genreq64.bdl.bdeSize;
5059 bsg_reply->reply_payload_rcv_len =
5060 lpfc_bsg_copy_data(rmp, &job->reply_payload,
5066 lpfc_sli_release_iocbq(phba, cmdiocbq);
5067 lpfc_free_bsg_buffers(phba, cmp);
5068 lpfc_free_bsg_buffers(phba, rmp);
5069 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5073 /* Complete the job if active */
5076 bsg_reply->result = rc;
5077 bsg_job_done(job, bsg_reply->result,
5078 bsg_reply->reply_payload_rcv_len);
5085 * lpfc_menlo_cmd - send an ioctl for menlo hardware
5086 * @job: fc_bsg_job to handle
5088 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
5089 * all the command completions will return the xri for the command.
5090 * For menlo data requests a gen request 64 CX is used to continue the exchange
5091 * supplied in the menlo request header xri field.
5094 lpfc_menlo_cmd(struct bsg_job *job)
5096 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5097 struct fc_bsg_request *bsg_request = job->request;
5098 struct fc_bsg_reply *bsg_reply = job->reply;
5099 struct lpfc_hba *phba = vport->phba;
5100 struct lpfc_iocbq *cmdiocbq;
5103 struct menlo_command *menlo_cmd;
5104 struct lpfc_dmabuf *bmp = NULL, *cmp = NULL, *rmp = NULL;
5107 struct bsg_job_data *dd_data;
5108 struct ulp_bde64 *bpl = NULL;
5110 /* in case no data is returned return just the return code */
5111 bsg_reply->reply_payload_rcv_len = 0;
5113 if (job->request_len <
5114 sizeof(struct fc_bsg_request) +
5115 sizeof(struct menlo_command)) {
5116 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5117 "2784 Received MENLO_CMD request below "
5123 if (job->reply_len <
5124 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
5125 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5126 "2785 Received MENLO_CMD reply below "
5132 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
5133 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5134 "2786 Adapter does not support menlo "
5140 menlo_cmd = (struct menlo_command *)
5141 bsg_request->rqst_data.h_vendor.vendor_cmd;
5143 /* allocate our bsg tracking structure */
5144 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
5146 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5147 "2787 Failed allocation of dd_data\n");
5152 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5158 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
5164 INIT_LIST_HEAD(&bmp->list);
5166 bpl = (struct ulp_bde64 *)bmp->virt;
5167 request_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64);
5168 cmp = lpfc_alloc_bsg_buffers(phba, job->request_payload.payload_len,
5169 1, bpl, &request_nseg);
5174 lpfc_bsg_copy_data(cmp, &job->request_payload,
5175 job->request_payload.payload_len, 1);
5177 bpl += request_nseg;
5178 reply_nseg = LPFC_BPL_SIZE/sizeof(struct ulp_bde64) - request_nseg;
5179 rmp = lpfc_alloc_bsg_buffers(phba, job->reply_payload.payload_len, 0,
5186 cmdiocbq = lpfc_sli_get_iocbq(phba);
5192 cmd = &cmdiocbq->iocb;
5193 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
5194 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5195 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5196 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5197 cmd->un.genreq64.bdl.bdeSize =
5198 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5199 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5200 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5201 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5202 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5203 cmd->ulpBdeCount = 1;
5204 cmd->ulpClass = CLASS3;
5205 cmd->ulpOwner = OWN_CHIP;
5206 cmd->ulpLe = 1; /* Limited Edition */
5207 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5208 cmdiocbq->vport = phba->pport;
5209 /* We want the firmware to timeout before we do */
5210 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5211 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5212 cmdiocbq->context1 = dd_data;
5213 cmdiocbq->context2 = cmp;
5214 cmdiocbq->context3 = bmp;
5215 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5216 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5217 cmd->ulpPU = MENLO_PU; /* 3 */
5218 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5219 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5221 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5223 cmd->un.ulpWord[4] = 0;
5224 cmd->ulpContext = menlo_cmd->xri;
5227 dd_data->type = TYPE_MENLO;
5228 dd_data->set_job = job;
5229 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5230 dd_data->context_un.menlo.rmp = rmp;
5231 job->dd_data = dd_data;
5233 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5235 if (rc == IOCB_SUCCESS)
5236 return 0; /* done for now */
5238 lpfc_sli_release_iocbq(phba, cmdiocbq);
5241 lpfc_free_bsg_buffers(phba, rmp);
5243 lpfc_free_bsg_buffers(phba, cmp);
5246 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5251 /* make error code available to userspace */
5252 bsg_reply->result = rc;
5253 job->dd_data = NULL;
5258 lpfc_forced_link_speed(struct bsg_job *job)
5260 struct Scsi_Host *shost = fc_bsg_to_shost(job);
5261 struct lpfc_vport *vport = shost_priv(shost);
5262 struct lpfc_hba *phba = vport->phba;
5263 struct fc_bsg_reply *bsg_reply = job->reply;
5264 struct forced_link_speed_support_reply *forced_reply;
5267 if (job->request_len <
5268 sizeof(struct fc_bsg_request) +
5269 sizeof(struct get_forced_link_speed_support)) {
5270 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5271 "0048 Received FORCED_LINK_SPEED request "
5272 "below minimum size\n");
5277 forced_reply = (struct forced_link_speed_support_reply *)
5278 bsg_reply->reply_data.vendor_reply.vendor_rsp;
5280 if (job->reply_len <
5281 sizeof(struct fc_bsg_request) +
5282 sizeof(struct forced_link_speed_support_reply)) {
5283 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
5284 "0049 Received FORCED_LINK_SPEED reply below "
5290 forced_reply->supported = (phba->hba_flag & HBA_FORCED_LINK_SPEED)
5291 ? LPFC_FORCED_LINK_SPEED_SUPPORTED
5292 : LPFC_FORCED_LINK_SPEED_NOT_SUPPORTED;
5294 bsg_reply->result = rc;
5296 bsg_job_done(job, bsg_reply->result,
5297 bsg_reply->reply_payload_rcv_len);
5302 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5303 * @job: fc_bsg_job to handle
5306 lpfc_bsg_hst_vendor(struct bsg_job *job)
5308 struct fc_bsg_request *bsg_request = job->request;
5309 struct fc_bsg_reply *bsg_reply = job->reply;
5310 int command = bsg_request->rqst_data.h_vendor.vendor_cmd[0];
5314 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5315 rc = lpfc_bsg_hba_set_event(job);
5317 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5318 rc = lpfc_bsg_hba_get_event(job);
5320 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5321 rc = lpfc_bsg_send_mgmt_rsp(job);
5323 case LPFC_BSG_VENDOR_DIAG_MODE:
5324 rc = lpfc_bsg_diag_loopback_mode(job);
5326 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5327 rc = lpfc_sli4_bsg_diag_mode_end(job);
5329 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5330 rc = lpfc_bsg_diag_loopback_run(job);
5332 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5333 rc = lpfc_sli4_bsg_link_diag_test(job);
5335 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5336 rc = lpfc_bsg_get_dfc_rev(job);
5338 case LPFC_BSG_VENDOR_MBOX:
5339 rc = lpfc_bsg_mbox_cmd(job);
5341 case LPFC_BSG_VENDOR_MENLO_CMD:
5342 case LPFC_BSG_VENDOR_MENLO_DATA:
5343 rc = lpfc_menlo_cmd(job);
5345 case LPFC_BSG_VENDOR_FORCED_LINK_SPEED:
5346 rc = lpfc_forced_link_speed(job);
5350 bsg_reply->reply_payload_rcv_len = 0;
5351 /* make error code available to userspace */
5352 bsg_reply->result = rc;
5360 * lpfc_bsg_request - handle a bsg request from the FC transport
5361 * @job: fc_bsg_job to handle
5364 lpfc_bsg_request(struct bsg_job *job)
5366 struct fc_bsg_request *bsg_request = job->request;
5367 struct fc_bsg_reply *bsg_reply = job->reply;
5371 msgcode = bsg_request->msgcode;
5373 case FC_BSG_HST_VENDOR:
5374 rc = lpfc_bsg_hst_vendor(job);
5376 case FC_BSG_RPT_ELS:
5377 rc = lpfc_bsg_rport_els(job);
5380 rc = lpfc_bsg_send_mgmt_cmd(job);
5384 bsg_reply->reply_payload_rcv_len = 0;
5385 /* make error code available to userspace */
5386 bsg_reply->result = rc;
5394 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5395 * @job: fc_bsg_job that has timed out
5397 * This function just aborts the job's IOCB. The aborted IOCB will return to
5398 * the waiting function which will handle passing the error back to userspace
5401 lpfc_bsg_timeout(struct bsg_job *job)
5403 struct lpfc_vport *vport = shost_priv(fc_bsg_to_shost(job));
5404 struct lpfc_hba *phba = vport->phba;
5405 struct lpfc_iocbq *cmdiocb;
5406 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5407 struct bsg_job_data *dd_data;
5408 unsigned long flags;
5410 LIST_HEAD(completions);
5411 struct lpfc_iocbq *check_iocb, *next_iocb;
5413 /* if job's driver data is NULL, the command completed or is in the
5414 * the process of completing. In this case, return status to request
5415 * so the timeout is retried. This avoids double completion issues
5416 * and the request will be pulled off the timer queue when the
5417 * command's completion handler executes. Otherwise, prevent the
5418 * command's completion handler from executing the job done callback
5419 * and continue processing to abort the outstanding the command.
5422 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5423 dd_data = (struct bsg_job_data *)job->dd_data;
5425 dd_data->set_job = NULL;
5426 job->dd_data = NULL;
5428 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5432 switch (dd_data->type) {
5434 /* Check to see if IOCB was issued to the port or not. If not,
5435 * remove it from the txq queue and call cancel iocbs.
5436 * Otherwise, call abort iotag
5438 cmdiocb = dd_data->context_un.iocb.cmdiocbq;
5439 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5441 spin_lock_irqsave(&phba->hbalock, flags);
5442 /* make sure the I/O abort window is still open */
5443 if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) {
5444 spin_unlock_irqrestore(&phba->hbalock, flags);
5447 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5449 if (check_iocb == cmdiocb) {
5450 list_move_tail(&check_iocb->list, &completions);
5454 if (list_empty(&completions))
5455 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5456 spin_unlock_irqrestore(&phba->hbalock, flags);
5457 if (!list_empty(&completions)) {
5458 lpfc_sli_cancel_iocbs(phba, &completions,
5459 IOSTAT_LOCAL_REJECT,
5465 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5469 /* Update the ext buf ctx state if needed */
5471 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5472 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5473 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5476 /* Check to see if IOCB was issued to the port or not. If not,
5477 * remove it from the txq queue and call cancel iocbs.
5478 * Otherwise, call abort iotag.
5480 cmdiocb = dd_data->context_un.menlo.cmdiocbq;
5481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5483 spin_lock_irqsave(&phba->hbalock, flags);
5484 list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq,
5486 if (check_iocb == cmdiocb) {
5487 list_move_tail(&check_iocb->list, &completions);
5491 if (list_empty(&completions))
5492 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5493 spin_unlock_irqrestore(&phba->hbalock, flags);
5494 if (!list_empty(&completions)) {
5495 lpfc_sli_cancel_iocbs(phba, &completions,
5496 IOSTAT_LOCAL_REJECT,
5501 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5505 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5506 * otherwise an error message will be displayed on the console
5507 * so always return success (zero)