1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2011 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
37 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_vport.h"
46 #include "lpfc_version.h"
48 struct lpfc_bsg_event {
49 struct list_head node;
53 /* Event type and waiter identifiers */
58 /* next two flags are here for the auto-delete logic */
59 unsigned long wait_time_stamp;
62 /* seen and not seen events */
63 struct list_head events_to_get;
64 struct list_head events_to_see;
66 /* job waiting for this event to finish */
67 struct fc_bsg_job *set_job;
70 struct lpfc_bsg_iocb {
71 struct lpfc_iocbq *cmdiocbq;
72 struct lpfc_iocbq *rspiocbq;
73 struct lpfc_dmabuf *bmp;
74 struct lpfc_nodelist *ndlp;
76 /* job waiting for this iocb to finish */
77 struct fc_bsg_job *set_job;
80 struct lpfc_bsg_mbox {
83 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
84 uint8_t *ext; /* extended mailbox data */
85 uint32_t mbOffset; /* from app */
86 uint32_t inExtWLen; /* from app */
87 uint32_t outExtWLen; /* from app */
89 /* job waiting for this mbox command to finish */
90 struct fc_bsg_job *set_job;
93 #define MENLO_DID 0x0000FC0E
95 struct lpfc_bsg_menlo {
96 struct lpfc_iocbq *cmdiocbq;
97 struct lpfc_iocbq *rspiocbq;
98 struct lpfc_dmabuf *bmp;
100 /* job waiting for this iocb to finish */
101 struct fc_bsg_job *set_job;
108 struct bsg_job_data {
111 struct lpfc_bsg_event *evt;
112 struct lpfc_bsg_iocb iocb;
113 struct lpfc_bsg_mbox mbox;
114 struct lpfc_bsg_menlo menlo;
119 struct list_head node;
126 #define BUF_SZ_4K 4096
127 #define SLI_CT_ELX_LOOPBACK 0x10
129 enum ELX_LOOPBACK_CMD {
130 ELX_LOOPBACK_XRI_SETUP,
134 #define ELX_LOOPBACK_HEADER_SZ \
135 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
137 struct lpfc_dmabufext {
138 struct lpfc_dmabuf dma;
144 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
145 * @phba: Pointer to HBA context object.
146 * @cmdiocbq: Pointer to command iocb.
147 * @rspiocbq: Pointer to response iocb.
149 * This function is the completion handler for iocbs issued using
150 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
151 * ring event handler function without any lock held. This function
152 * can be called from both worker thread context and interrupt
153 * context. This function also can be called from another thread which
154 * cleans up the SLI layer objects.
155 * This function copies the contents of the response iocb to the
156 * response iocb memory object provided by the caller of
157 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
158 * sleeps for the iocb completion.
161 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
162 struct lpfc_iocbq *cmdiocbq,
163 struct lpfc_iocbq *rspiocbq)
165 struct bsg_job_data *dd_data;
166 struct fc_bsg_job *job;
168 struct lpfc_dmabuf *bmp;
169 struct lpfc_nodelist *ndlp;
170 struct lpfc_bsg_iocb *iocb;
174 spin_lock_irqsave(&phba->ct_ev_lock, flags);
175 dd_data = cmdiocbq->context2;
177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
178 lpfc_sli_release_iocbq(phba, cmdiocbq);
182 iocb = &dd_data->context_un.iocb;
184 job->dd_data = NULL; /* so timeout handler does not reply */
187 rsp = &rspiocbq->iocb;
188 ndlp = cmdiocbq->context1;
190 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
191 job->request_payload.sg_cnt, DMA_TO_DEVICE);
192 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
193 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
195 if (rsp->ulpStatus) {
196 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
197 switch (rsp->un.ulpWord[4] & 0xff) {
198 case IOERR_SEQUENCE_TIMEOUT:
201 case IOERR_INVALID_RPI:
211 job->reply->reply_payload_rcv_len =
212 rsp->un.genreq64.bdl.bdeSize;
214 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
215 lpfc_sli_release_iocbq(phba, cmdiocbq);
219 /* make error code available to userspace */
220 job->reply->result = rc;
221 /* complete the job back to userspace */
223 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
228 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
229 * @job: fc_bsg_job to handle
232 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
234 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
235 struct lpfc_hba *phba = vport->phba;
236 struct lpfc_rport_data *rdata = job->rport->dd_data;
237 struct lpfc_nodelist *ndlp = rdata->pnode;
238 struct ulp_bde64 *bpl = NULL;
240 struct lpfc_iocbq *cmdiocbq = NULL;
242 struct lpfc_dmabuf *bmp = NULL;
245 struct scatterlist *sgel = NULL;
248 struct bsg_job_data *dd_data;
253 /* in case no data is transferred */
254 job->reply->reply_payload_rcv_len = 0;
256 /* allocate our bsg tracking structure */
257 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
259 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
260 "2733 Failed allocation of dd_data\n");
265 if (!lpfc_nlp_get(ndlp)) {
270 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
276 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
281 cmdiocbq = lpfc_sli_get_iocbq(phba);
287 cmd = &cmdiocbq->iocb;
288 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
294 INIT_LIST_HEAD(&bmp->list);
295 bpl = (struct ulp_bde64 *) bmp->virt;
296 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
297 job->request_payload.sg_cnt, DMA_TO_DEVICE);
298 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
299 busaddr = sg_dma_address(sgel);
300 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
301 bpl->tus.f.bdeSize = sg_dma_len(sgel);
302 bpl->tus.w = cpu_to_le32(bpl->tus.w);
303 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
304 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
308 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
309 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
310 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
311 busaddr = sg_dma_address(sgel);
312 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
313 bpl->tus.f.bdeSize = sg_dma_len(sgel);
314 bpl->tus.w = cpu_to_le32(bpl->tus.w);
315 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
316 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
320 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
321 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
322 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
323 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
324 cmd->un.genreq64.bdl.bdeSize =
325 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
326 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
327 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
328 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
329 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
330 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
331 cmd->ulpBdeCount = 1;
333 cmd->ulpClass = CLASS3;
334 cmd->ulpContext = ndlp->nlp_rpi;
335 if (phba->sli_rev == LPFC_SLI_REV4)
336 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
337 cmd->ulpOwner = OWN_CHIP;
338 cmdiocbq->vport = phba->pport;
339 cmdiocbq->context3 = bmp;
340 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
341 timeout = phba->fc_ratov * 2;
342 cmd->ulpTimeout = timeout;
344 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
345 cmdiocbq->context1 = ndlp;
346 cmdiocbq->context2 = dd_data;
347 dd_data->type = TYPE_IOCB;
348 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
349 dd_data->context_un.iocb.set_job = job;
350 dd_data->context_un.iocb.bmp = bmp;
352 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
353 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
357 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
358 writel(creg_val, phba->HCregaddr);
359 readl(phba->HCregaddr); /* flush */
362 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
363 if (iocb_stat == IOCB_SUCCESS)
364 return 0; /* done for now */
365 else if (iocb_stat == IOCB_BUSY)
371 /* iocb failed so cleanup */
372 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
373 job->request_payload.sg_cnt, DMA_TO_DEVICE);
374 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
375 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
377 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
380 lpfc_sli_release_iocbq(phba, cmdiocbq);
388 /* make error code available to userspace */
389 job->reply->result = rc;
395 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
396 * @phba: Pointer to HBA context object.
397 * @cmdiocbq: Pointer to command iocb.
398 * @rspiocbq: Pointer to response iocb.
400 * This function is the completion handler for iocbs issued using
401 * lpfc_bsg_rport_els_cmp function. This function is called by the
402 * ring event handler function without any lock held. This function
403 * can be called from both worker thread context and interrupt
404 * context. This function also can be called from other thread which
405 * cleans up the SLI layer objects.
406 * This function copies the contents of the response iocb to the
407 * response iocb memory object provided by the caller of
408 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
409 * sleeps for the iocb completion.
412 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
413 struct lpfc_iocbq *cmdiocbq,
414 struct lpfc_iocbq *rspiocbq)
416 struct bsg_job_data *dd_data;
417 struct fc_bsg_job *job;
419 struct lpfc_nodelist *ndlp;
420 struct lpfc_dmabuf *pbuflist = NULL;
421 struct fc_bsg_ctels_reply *els_reply;
426 spin_lock_irqsave(&phba->ct_ev_lock, flags);
427 dd_data = cmdiocbq->context1;
428 /* normal completion and timeout crossed paths, already done */
430 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
434 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
435 if (cmdiocbq->context2 && rspiocbq)
436 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
437 &rspiocbq->iocb, sizeof(IOCB_t));
439 job = dd_data->context_un.iocb.set_job;
440 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
441 rspiocbq = dd_data->context_un.iocb.rspiocbq;
442 rsp = &rspiocbq->iocb;
443 ndlp = dd_data->context_un.iocb.ndlp;
445 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
446 job->request_payload.sg_cnt, DMA_TO_DEVICE);
447 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
448 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
450 if (job->reply->result == -EAGAIN)
452 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
453 job->reply->reply_payload_rcv_len =
454 rsp->un.elsreq64.bdl.bdeSize;
455 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
456 job->reply->reply_payload_rcv_len =
457 sizeof(struct fc_bsg_ctels_reply);
458 /* LS_RJT data returned in word 4 */
459 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
460 els_reply = &job->reply->reply_data.ctels_reply;
461 els_reply->status = FC_CTELS_STATUS_REJECT;
462 els_reply->rjt_data.action = rjt_data[3];
463 els_reply->rjt_data.reason_code = rjt_data[2];
464 els_reply->rjt_data.reason_explanation = rjt_data[1];
465 els_reply->rjt_data.vendor_unique = rjt_data[0];
469 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
470 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
471 lpfc_sli_release_iocbq(phba, rspiocbq);
472 lpfc_sli_release_iocbq(phba, cmdiocbq);
475 /* make error code available to userspace */
476 job->reply->result = rc;
478 /* complete the job back to userspace */
480 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
485 * lpfc_bsg_rport_els - send an ELS command from a bsg request
486 * @job: fc_bsg_job to handle
489 lpfc_bsg_rport_els(struct fc_bsg_job *job)
491 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
492 struct lpfc_hba *phba = vport->phba;
493 struct lpfc_rport_data *rdata = job->rport->dd_data;
494 struct lpfc_nodelist *ndlp = rdata->pnode;
498 struct lpfc_iocbq *rspiocbq;
499 struct lpfc_iocbq *cmdiocbq;
502 struct lpfc_dmabuf *pcmd;
503 struct lpfc_dmabuf *prsp;
504 struct lpfc_dmabuf *pbuflist = NULL;
505 struct ulp_bde64 *bpl;
508 struct scatterlist *sgel = NULL;
511 struct bsg_job_data *dd_data;
515 /* in case no data is transferred */
516 job->reply->reply_payload_rcv_len = 0;
518 /* allocate our bsg tracking structure */
519 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
521 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
522 "2735 Failed allocation of dd_data\n");
527 if (!lpfc_nlp_get(ndlp)) {
532 elscmd = job->request->rqst_data.r_els.els_code;
533 cmdsize = job->request_payload.payload_len;
534 rspsize = job->reply_payload.payload_len;
535 rspiocbq = lpfc_sli_get_iocbq(phba);
542 rsp = &rspiocbq->iocb;
545 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
546 ndlp->nlp_DID, elscmd);
552 /* prep els iocb set context1 to the ndlp, context2 to the command
553 * dmabuf, context3 holds the data dmabuf
555 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
556 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
557 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
559 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
561 cmdiocbq->context2 = NULL;
563 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
564 bpl = (struct ulp_bde64 *) pbuflist->virt;
566 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
567 job->request_payload.sg_cnt, DMA_TO_DEVICE);
568 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
569 busaddr = sg_dma_address(sgel);
570 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
571 bpl->tus.f.bdeSize = sg_dma_len(sgel);
572 bpl->tus.w = cpu_to_le32(bpl->tus.w);
573 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
574 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
578 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
579 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
580 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
581 busaddr = sg_dma_address(sgel);
582 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
583 bpl->tus.f.bdeSize = sg_dma_len(sgel);
584 bpl->tus.w = cpu_to_le32(bpl->tus.w);
585 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
586 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
589 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
590 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
591 cmdiocbq->iocb.ulpContext = rpi;
592 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
593 cmdiocbq->context1 = NULL;
594 cmdiocbq->context2 = NULL;
596 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
597 cmdiocbq->context1 = dd_data;
598 cmdiocbq->context2 = rspiocbq;
599 dd_data->type = TYPE_IOCB;
600 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
601 dd_data->context_un.iocb.rspiocbq = rspiocbq;
602 dd_data->context_un.iocb.set_job = job;
603 dd_data->context_un.iocb.bmp = NULL;
604 dd_data->context_un.iocb.ndlp = ndlp;
606 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
607 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
611 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
612 writel(creg_val, phba->HCregaddr);
613 readl(phba->HCregaddr); /* flush */
615 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
617 if (rc == IOCB_SUCCESS)
618 return 0; /* done for now */
619 else if (rc == IOCB_BUSY)
625 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
626 job->request_payload.sg_cnt, DMA_TO_DEVICE);
627 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
628 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
630 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
632 lpfc_sli_release_iocbq(phba, cmdiocbq);
635 lpfc_sli_release_iocbq(phba, rspiocbq);
641 /* make error code available to userspace */
642 job->reply->result = rc;
648 * lpfc_bsg_event_free - frees an allocated event structure
649 * @kref: Pointer to a kref.
651 * Called from kref_put. Back cast the kref into an event structure address.
652 * Free any events to get, delete associated nodes, free any events to see,
653 * free any data then free the event itself.
656 lpfc_bsg_event_free(struct kref *kref)
658 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
660 struct event_data *ed;
662 list_del(&evt->node);
664 while (!list_empty(&evt->events_to_get)) {
665 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
671 while (!list_empty(&evt->events_to_see)) {
672 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
682 * lpfc_bsg_event_ref - increments the kref for an event
683 * @evt: Pointer to an event structure.
686 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
688 kref_get(&evt->kref);
692 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
693 * @evt: Pointer to an event structure.
696 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
698 kref_put(&evt->kref, lpfc_bsg_event_free);
702 * lpfc_bsg_event_new - allocate and initialize a event structure
703 * @ev_mask: Mask of events.
704 * @ev_reg_id: Event reg id.
705 * @ev_req_id: Event request id.
707 static struct lpfc_bsg_event *
708 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
710 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
715 INIT_LIST_HEAD(&evt->events_to_get);
716 INIT_LIST_HEAD(&evt->events_to_see);
717 evt->type_mask = ev_mask;
718 evt->req_id = ev_req_id;
719 evt->reg_id = ev_reg_id;
720 evt->wait_time_stamp = jiffies;
721 init_waitqueue_head(&evt->wq);
722 kref_init(&evt->kref);
727 * diag_cmd_data_free - Frees an lpfc dma buffer extension
728 * @phba: Pointer to HBA context object.
729 * @mlist: Pointer to an lpfc dma buffer extension.
732 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
734 struct lpfc_dmabufext *mlast;
735 struct pci_dev *pcidev;
736 struct list_head head, *curr, *next;
738 if ((!mlist) || (!lpfc_is_link_up(phba) &&
739 (phba->link_flag & LS_LOOPBACK_MODE))) {
743 pcidev = phba->pcidev;
744 list_add_tail(&head, &mlist->dma.list);
746 list_for_each_safe(curr, next, &head) {
747 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
749 dma_free_coherent(&pcidev->dev,
759 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
764 * This function is called when an unsolicited CT command is received. It
765 * forwards the event to any processes registered to receive CT events.
768 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
769 struct lpfc_iocbq *piocbq)
771 uint32_t evt_req_id = 0;
774 struct lpfc_dmabuf *dmabuf = NULL;
775 struct lpfc_bsg_event *evt;
776 struct event_data *evt_dat = NULL;
777 struct lpfc_iocbq *iocbq;
779 struct list_head head;
780 struct ulp_bde64 *bde;
783 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
784 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
785 struct lpfc_hbq_entry *hbqe;
786 struct lpfc_sli_ct_request *ct_req;
787 struct fc_bsg_job *job = NULL;
791 INIT_LIST_HEAD(&head);
792 list_add_tail(&head, &piocbq->list);
794 if (piocbq->iocb.ulpBdeCount == 0 ||
795 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
796 goto error_ct_unsol_exit;
798 if (phba->link_state == LPFC_HBA_ERROR ||
799 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
800 goto error_ct_unsol_exit;
802 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
805 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
806 piocbq->iocb.un.cont64[0].addrLow);
807 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
810 goto error_ct_unsol_exit;
811 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
812 evt_req_id = ct_req->FsType;
813 cmd = ct_req->CommandResponse.bits.CmdRsp;
814 len = ct_req->CommandResponse.bits.Size;
815 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
816 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
818 spin_lock_irqsave(&phba->ct_ev_lock, flags);
819 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
820 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
821 evt->req_id != evt_req_id)
824 lpfc_bsg_event_ref(evt);
825 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
826 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
827 if (evt_dat == NULL) {
828 spin_lock_irqsave(&phba->ct_ev_lock, flags);
829 lpfc_bsg_event_unref(evt);
830 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
831 "2614 Memory allocation failed for "
836 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
837 /* take accumulated byte count from the last iocbq */
838 iocbq = list_entry(head.prev, typeof(*iocbq), list);
839 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
841 list_for_each_entry(iocbq, &head, list) {
842 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
844 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
848 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
849 if (evt_dat->data == NULL) {
850 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
851 "2615 Memory allocation failed for "
852 "CT event data, size %d\n",
855 spin_lock_irqsave(&phba->ct_ev_lock, flags);
856 lpfc_bsg_event_unref(evt);
857 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
858 goto error_ct_unsol_exit;
861 list_for_each_entry(iocbq, &head, list) {
863 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
864 bdeBuf1 = iocbq->context2;
865 bdeBuf2 = iocbq->context3;
867 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
868 if (phba->sli3_options &
869 LPFC_SLI3_HBQ_ENABLED) {
871 hbqe = (struct lpfc_hbq_entry *)
872 &iocbq->iocb.un.ulpWord[0];
873 size = hbqe->bde.tus.f.bdeSize;
876 hbqe = (struct lpfc_hbq_entry *)
879 size = hbqe->bde.tus.f.bdeSize;
882 if ((offset + size) > evt_dat->len)
883 size = evt_dat->len - offset;
885 size = iocbq->iocb.un.cont64[i].
887 bde = &iocbq->iocb.un.cont64[i];
888 dma_addr = getPaddr(bde->addrHigh,
890 dmabuf = lpfc_sli_ringpostbuf_get(phba,
894 lpfc_printf_log(phba, KERN_ERR,
895 LOG_LIBDFC, "2616 No dmabuf "
896 "found for iocbq 0x%p\n",
898 kfree(evt_dat->data);
900 spin_lock_irqsave(&phba->ct_ev_lock,
902 lpfc_bsg_event_unref(evt);
903 spin_unlock_irqrestore(
904 &phba->ct_ev_lock, flags);
905 goto error_ct_unsol_exit;
907 memcpy((char *)(evt_dat->data) + offset,
910 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
911 !(phba->sli3_options &
912 LPFC_SLI3_HBQ_ENABLED)) {
913 lpfc_sli_ringpostbuf_put(phba, pring,
917 case ELX_LOOPBACK_DATA:
918 diag_cmd_data_free(phba,
919 (struct lpfc_dmabufext *)
922 case ELX_LOOPBACK_XRI_SETUP:
923 if ((phba->sli_rev ==
925 (phba->sli3_options &
926 LPFC_SLI3_HBQ_ENABLED
928 lpfc_in_buf_free(phba,
931 lpfc_post_buffer(phba,
937 if (!(phba->sli3_options &
938 LPFC_SLI3_HBQ_ENABLED))
939 lpfc_post_buffer(phba,
948 spin_lock_irqsave(&phba->ct_ev_lock, flags);
949 if (phba->sli_rev == LPFC_SLI_REV4) {
950 evt_dat->immed_dat = phba->ctx_idx;
951 phba->ctx_idx = (phba->ctx_idx + 1) % 64;
952 /* Provide warning for over-run of the ct_ctx array */
953 if (phba->ct_ctx[evt_dat->immed_dat].flags &
955 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
956 "2717 CT context array entry "
957 "[%d] over-run: oxid:x%x, "
958 "sid:x%x\n", phba->ctx_idx,
960 evt_dat->immed_dat].oxid,
962 evt_dat->immed_dat].SID);
963 phba->ct_ctx[evt_dat->immed_dat].rxid =
964 piocbq->iocb.ulpContext;
965 phba->ct_ctx[evt_dat->immed_dat].oxid =
966 piocbq->iocb.unsli3.rcvsli3.ox_id;
967 phba->ct_ctx[evt_dat->immed_dat].SID =
968 piocbq->iocb.un.rcvels.remoteID;
969 phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
971 evt_dat->immed_dat = piocbq->iocb.ulpContext;
973 evt_dat->type = FC_REG_CT_EVENT;
974 list_add(&evt_dat->node, &evt->events_to_see);
975 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
976 wake_up_interruptible(&evt->wq);
977 lpfc_bsg_event_unref(evt);
981 list_move(evt->events_to_see.prev, &evt->events_to_get);
982 lpfc_bsg_event_unref(evt);
987 job->reply->reply_payload_rcv_len = size;
988 /* make error code available to userspace */
989 job->reply->result = 0;
991 /* complete the job back to userspace */
992 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
994 spin_lock_irqsave(&phba->ct_ev_lock, flags);
997 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1000 if (!list_empty(&head))
1002 if (evt_req_id == SLI_CT_ELX_LOOPBACK)
1008 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1009 * @job: SET_EVENT fc_bsg_job
1012 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1014 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1015 struct lpfc_hba *phba = vport->phba;
1016 struct set_ct_event *event_req;
1017 struct lpfc_bsg_event *evt;
1019 struct bsg_job_data *dd_data = NULL;
1021 unsigned long flags;
1023 if (job->request_len <
1024 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1025 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1026 "2612 Received SET_CT_EVENT below minimum "
1032 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1033 if (dd_data == NULL) {
1034 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1035 "2734 Failed allocation of dd_data\n");
1040 event_req = (struct set_ct_event *)
1041 job->request->rqst_data.h_vendor.vendor_cmd;
1042 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1044 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1045 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1046 if (evt->reg_id == event_req->ev_reg_id) {
1047 lpfc_bsg_event_ref(evt);
1048 evt->wait_time_stamp = jiffies;
1052 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1054 if (&evt->node == &phba->ct_ev_waiters) {
1055 /* no event waiting struct yet - first call */
1056 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1057 event_req->ev_req_id);
1059 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1060 "2617 Failed allocation of event "
1066 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1067 list_add(&evt->node, &phba->ct_ev_waiters);
1068 lpfc_bsg_event_ref(evt);
1069 evt->wait_time_stamp = jiffies;
1070 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1073 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1075 dd_data->type = TYPE_EVT;
1076 dd_data->context_un.evt = evt;
1077 evt->set_job = job; /* for unsolicited command */
1078 job->dd_data = dd_data; /* for fc transport timeout callback*/
1079 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1080 return 0; /* call job done later */
1083 if (dd_data != NULL)
1086 job->dd_data = NULL;
1091 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1092 * @job: GET_EVENT fc_bsg_job
1095 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1097 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1098 struct lpfc_hba *phba = vport->phba;
1099 struct get_ct_event *event_req;
1100 struct get_ct_event_reply *event_reply;
1101 struct lpfc_bsg_event *evt;
1102 struct event_data *evt_dat = NULL;
1103 unsigned long flags;
1106 if (job->request_len <
1107 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1108 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1109 "2613 Received GET_CT_EVENT request below "
1115 event_req = (struct get_ct_event *)
1116 job->request->rqst_data.h_vendor.vendor_cmd;
1118 event_reply = (struct get_ct_event_reply *)
1119 job->reply->reply_data.vendor_reply.vendor_rsp;
1120 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1121 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1122 if (evt->reg_id == event_req->ev_reg_id) {
1123 if (list_empty(&evt->events_to_get))
1125 lpfc_bsg_event_ref(evt);
1126 evt->wait_time_stamp = jiffies;
1127 evt_dat = list_entry(evt->events_to_get.prev,
1128 struct event_data, node);
1129 list_del(&evt_dat->node);
1133 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1135 /* The app may continue to ask for event data until it gets
1136 * an error indicating that there isn't anymore
1138 if (evt_dat == NULL) {
1139 job->reply->reply_payload_rcv_len = 0;
1144 if (evt_dat->len > job->request_payload.payload_len) {
1145 evt_dat->len = job->request_payload.payload_len;
1146 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1147 "2618 Truncated event data at %d "
1149 job->request_payload.payload_len);
1152 event_reply->type = evt_dat->type;
1153 event_reply->immed_data = evt_dat->immed_dat;
1154 if (evt_dat->len > 0)
1155 job->reply->reply_payload_rcv_len =
1156 sg_copy_from_buffer(job->request_payload.sg_list,
1157 job->request_payload.sg_cnt,
1158 evt_dat->data, evt_dat->len);
1160 job->reply->reply_payload_rcv_len = 0;
1163 kfree(evt_dat->data);
1167 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1168 lpfc_bsg_event_unref(evt);
1169 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1170 job->dd_data = NULL;
1171 job->reply->result = 0;
1176 job->dd_data = NULL;
1177 job->reply->result = rc;
1182 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1183 * @phba: Pointer to HBA context object.
1184 * @cmdiocbq: Pointer to command iocb.
1185 * @rspiocbq: Pointer to response iocb.
1187 * This function is the completion handler for iocbs issued using
1188 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1189 * ring event handler function without any lock held. This function
1190 * can be called from both worker thread context and interrupt
1191 * context. This function also can be called from other thread which
1192 * cleans up the SLI layer objects.
1193 * This function copy the contents of the response iocb to the
1194 * response iocb memory object provided by the caller of
1195 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1196 * sleeps for the iocb completion.
1199 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1200 struct lpfc_iocbq *cmdiocbq,
1201 struct lpfc_iocbq *rspiocbq)
1203 struct bsg_job_data *dd_data;
1204 struct fc_bsg_job *job;
1206 struct lpfc_dmabuf *bmp;
1207 struct lpfc_nodelist *ndlp;
1208 unsigned long flags;
1211 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1212 dd_data = cmdiocbq->context2;
1213 /* normal completion and timeout crossed paths, already done */
1215 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1219 job = dd_data->context_un.iocb.set_job;
1220 bmp = dd_data->context_un.iocb.bmp;
1221 rsp = &rspiocbq->iocb;
1222 ndlp = dd_data->context_un.iocb.ndlp;
1224 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1225 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1227 if (rsp->ulpStatus) {
1228 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1229 switch (rsp->un.ulpWord[4] & 0xff) {
1230 case IOERR_SEQUENCE_TIMEOUT:
1233 case IOERR_INVALID_RPI:
1243 job->reply->reply_payload_rcv_len =
1244 rsp->un.genreq64.bdl.bdeSize;
1246 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1247 lpfc_sli_release_iocbq(phba, cmdiocbq);
1251 /* make error code available to userspace */
1252 job->reply->result = rc;
1253 job->dd_data = NULL;
1254 /* complete the job back to userspace */
1256 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1261 * lpfc_issue_ct_rsp - issue a ct response
1262 * @phba: Pointer to HBA context object.
1263 * @job: Pointer to the job object.
1264 * @tag: tag index value into the ports context exchange array.
1265 * @bmp: Pointer to a dma buffer descriptor.
1266 * @num_entry: Number of enties in the bde.
1269 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1270 struct lpfc_dmabuf *bmp, int num_entry)
1273 struct lpfc_iocbq *ctiocb = NULL;
1275 struct lpfc_nodelist *ndlp = NULL;
1276 struct bsg_job_data *dd_data;
1279 /* allocate our bsg tracking structure */
1280 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1282 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1283 "2736 Failed allocation of dd_data\n");
1288 /* Allocate buffer for command iocb */
1289 ctiocb = lpfc_sli_get_iocbq(phba);
1295 icmd = &ctiocb->iocb;
1296 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1297 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1298 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1299 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1300 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1301 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1302 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1303 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1304 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1306 /* Fill in rest of iocb */
1307 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1308 icmd->ulpBdeCount = 1;
1310 icmd->ulpClass = CLASS3;
1311 if (phba->sli_rev == LPFC_SLI_REV4) {
1312 /* Do not issue unsol response if oxid not marked as valid */
1313 if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
1315 goto issue_ct_rsp_exit;
1317 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1318 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1319 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1321 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1322 "2721 ndlp null for oxid %x SID %x\n",
1324 phba->ct_ctx[tag].SID);
1326 goto issue_ct_rsp_exit;
1329 /* Check if the ndlp is active */
1330 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1332 goto issue_ct_rsp_exit;
1335 /* get a refernece count so the ndlp doesn't go away while
1338 if (!lpfc_nlp_get(ndlp)) {
1340 goto issue_ct_rsp_exit;
1343 icmd->un.ulpWord[3] =
1344 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1346 /* The exchange is done, mark the entry as invalid */
1347 phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
1349 icmd->ulpContext = (ushort) tag;
1351 icmd->ulpTimeout = phba->fc_ratov * 2;
1353 /* Xmit CT response on exchange <xid> */
1354 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1355 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1356 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1358 ctiocb->iocb_cmpl = NULL;
1359 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1360 ctiocb->vport = phba->pport;
1361 ctiocb->context3 = bmp;
1363 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1364 ctiocb->context2 = dd_data;
1365 ctiocb->context1 = ndlp;
1366 dd_data->type = TYPE_IOCB;
1367 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1368 dd_data->context_un.iocb.rspiocbq = NULL;
1369 dd_data->context_un.iocb.set_job = job;
1370 dd_data->context_un.iocb.bmp = bmp;
1371 dd_data->context_un.iocb.ndlp = ndlp;
1373 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1374 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1376 goto issue_ct_rsp_exit;
1378 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1379 writel(creg_val, phba->HCregaddr);
1380 readl(phba->HCregaddr); /* flush */
1383 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1385 if (rc == IOCB_SUCCESS)
1386 return 0; /* done for now */
1389 lpfc_sli_release_iocbq(phba, ctiocb);
1397 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1398 * @job: SEND_MGMT_RESP fc_bsg_job
1401 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1403 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1404 struct lpfc_hba *phba = vport->phba;
1405 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1406 job->request->rqst_data.h_vendor.vendor_cmd;
1407 struct ulp_bde64 *bpl;
1408 struct lpfc_dmabuf *bmp = NULL;
1409 struct scatterlist *sgel = NULL;
1413 uint32_t tag = mgmt_resp->tag;
1414 unsigned long reqbfrcnt =
1415 (unsigned long)job->request_payload.payload_len;
1418 /* in case no data is transferred */
1419 job->reply->reply_payload_rcv_len = 0;
1421 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1423 goto send_mgmt_rsp_exit;
1426 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1429 goto send_mgmt_rsp_exit;
1432 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1435 goto send_mgmt_rsp_free_bmp;
1438 INIT_LIST_HEAD(&bmp->list);
1439 bpl = (struct ulp_bde64 *) bmp->virt;
1440 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1441 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1442 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1443 busaddr = sg_dma_address(sgel);
1444 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1445 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1446 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1447 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1448 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1452 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1454 if (rc == IOCB_SUCCESS)
1455 return 0; /* done for now */
1457 /* TBD need to handle a timeout */
1458 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1459 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1461 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1463 send_mgmt_rsp_free_bmp:
1466 /* make error code available to userspace */
1467 job->reply->result = rc;
1468 job->dd_data = NULL;
1473 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1474 * @phba: Pointer to HBA context object.
1476 * This function is responsible for preparing driver for diag loopback
1480 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1482 struct lpfc_vport **vports;
1483 struct Scsi_Host *shost;
1484 struct lpfc_sli *psli;
1485 struct lpfc_sli_ring *pring;
1492 pring = &psli->ring[LPFC_FCP_RING];
1496 if ((phba->link_state == LPFC_HBA_ERROR) ||
1497 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1498 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1501 vports = lpfc_create_vport_work_array(phba);
1503 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1504 shost = lpfc_shost_from_vport(vports[i]);
1505 scsi_block_requests(shost);
1507 lpfc_destroy_vport_work_array(phba, vports);
1509 shost = lpfc_shost_from_vport(phba->pport);
1510 scsi_block_requests(shost);
1513 while (pring->txcmplq_cnt) {
1514 if (i++ > 500) /* wait up to 5 seconds */
1522 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1525 * This function is responsible for driver exit processing of setting up
1526 * diag loopback mode on device.
1529 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1531 struct Scsi_Host *shost;
1532 struct lpfc_vport **vports;
1535 vports = lpfc_create_vport_work_array(phba);
1537 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1538 shost = lpfc_shost_from_vport(vports[i]);
1539 scsi_unblock_requests(shost);
1541 lpfc_destroy_vport_work_array(phba, vports);
1543 shost = lpfc_shost_from_vport(phba->pport);
1544 scsi_unblock_requests(shost);
1550 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1551 * @phba: Pointer to HBA context object.
1552 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1554 * This function is responsible for placing an sli3 port into diagnostic
1555 * loopback mode in order to perform a diagnostic loopback test.
1556 * All new scsi requests are blocked, a small delay is used to allow the
1557 * scsi requests to complete then the link is brought down. If the link is
1558 * is placed in loopback mode then scsi requests are again allowed
1559 * so the scsi mid-layer doesn't give up on the port.
1560 * All of this is done in-line.
1563 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1565 struct diag_mode_set *loopback_mode;
1566 uint32_t link_flags;
1568 LPFC_MBOXQ_t *pmboxq;
1573 /* no data to return just the return code */
1574 job->reply->reply_payload_rcv_len = 0;
1576 if (job->request_len < sizeof(struct fc_bsg_request) +
1577 sizeof(struct diag_mode_set)) {
1578 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1579 "2738 Received DIAG MODE request size:%d "
1580 "below the minimum size:%d\n",
1582 (int)(sizeof(struct fc_bsg_request) +
1583 sizeof(struct diag_mode_set)));
1588 rc = lpfc_bsg_diag_mode_enter(phba);
1592 /* bring the link to diagnostic mode */
1593 loopback_mode = (struct diag_mode_set *)
1594 job->request->rqst_data.h_vendor.vendor_cmd;
1595 link_flags = loopback_mode->type;
1596 timeout = loopback_mode->timeout * 100;
1598 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1601 goto loopback_mode_exit;
1603 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1604 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1605 pmboxq->u.mb.mbxOwner = OWN_HOST;
1607 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1609 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1610 /* wait for link down before proceeding */
1612 while (phba->link_state != LPFC_LINK_DOWN) {
1613 if (i++ > timeout) {
1615 goto loopback_mode_exit;
1621 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1622 if (link_flags == INTERNAL_LOOP_BACK)
1623 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1625 pmboxq->u.mb.un.varInitLnk.link_flags =
1626 FLAGS_TOPOLOGY_MODE_LOOP;
1628 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1629 pmboxq->u.mb.mbxOwner = OWN_HOST;
1631 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1634 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1637 phba->link_flag |= LS_LOOPBACK_MODE;
1638 /* wait for the link attention interrupt */
1642 while (phba->link_state != LPFC_HBA_READY) {
1643 if (i++ > timeout) {
1656 lpfc_bsg_diag_mode_exit(phba);
1659 * Let SLI layer release mboxq if mbox command completed after timeout.
1661 if (mbxstatus != MBX_TIMEOUT)
1662 mempool_free(pmboxq, phba->mbox_mem_pool);
1665 /* make error code available to userspace */
1666 job->reply->result = rc;
1667 /* complete the job back to userspace if no error */
1674 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1675 * @phba: Pointer to HBA context object.
1676 * @diag: Flag for set link to diag or nomral operation state.
1678 * This function is responsible for issuing a sli4 mailbox command for setting
1679 * link to either diag state or normal operation state.
1682 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1684 LPFC_MBOXQ_t *pmboxq;
1685 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1686 uint32_t req_len, alloc_len;
1687 int mbxstatus = MBX_SUCCESS, rc;
1689 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1693 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1694 sizeof(struct lpfc_sli4_cfg_mhdr));
1695 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1696 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1697 req_len, LPFC_SLI4_MBX_EMBED);
1698 if (alloc_len != req_len) {
1700 goto link_diag_state_set_out;
1702 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1703 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1704 phba->sli4_hba.link_state.number);
1705 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1706 phba->sli4_hba.link_state.type);
1708 bf_set(lpfc_mbx_set_diag_state_diag,
1709 &link_diag_state->u.req, 1);
1711 bf_set(lpfc_mbx_set_diag_state_diag,
1712 &link_diag_state->u.req, 0);
1714 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1716 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1721 link_diag_state_set_out:
1722 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1723 mempool_free(pmboxq, phba->mbox_mem_pool);
1729 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1730 * @phba: Pointer to HBA context object.
1731 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1733 * This function is responsible for placing an sli4 port into diagnostic
1734 * loopback mode in order to perform a diagnostic loopback test.
1737 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1739 struct diag_mode_set *loopback_mode;
1740 uint32_t link_flags, timeout, req_len, alloc_len;
1741 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1742 LPFC_MBOXQ_t *pmboxq = NULL;
1743 int mbxstatus, i, rc = 0;
1745 /* no data to return just the return code */
1746 job->reply->reply_payload_rcv_len = 0;
1748 if (job->request_len < sizeof(struct fc_bsg_request) +
1749 sizeof(struct diag_mode_set)) {
1750 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1751 "3011 Received DIAG MODE request size:%d "
1752 "below the minimum size:%d\n",
1754 (int)(sizeof(struct fc_bsg_request) +
1755 sizeof(struct diag_mode_set)));
1760 rc = lpfc_bsg_diag_mode_enter(phba);
1764 /* bring the link to diagnostic mode */
1765 loopback_mode = (struct diag_mode_set *)
1766 job->request->rqst_data.h_vendor.vendor_cmd;
1767 link_flags = loopback_mode->type;
1768 timeout = loopback_mode->timeout * 100;
1770 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1772 goto loopback_mode_exit;
1774 /* wait for link down before proceeding */
1776 while (phba->link_state != LPFC_LINK_DOWN) {
1777 if (i++ > timeout) {
1779 goto loopback_mode_exit;
1783 /* set up loopback mode */
1784 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1787 goto loopback_mode_exit;
1789 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1790 sizeof(struct lpfc_sli4_cfg_mhdr));
1791 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1792 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1793 req_len, LPFC_SLI4_MBX_EMBED);
1794 if (alloc_len != req_len) {
1796 goto loopback_mode_exit;
1798 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1799 bf_set(lpfc_mbx_set_diag_state_link_num,
1800 &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
1801 bf_set(lpfc_mbx_set_diag_state_link_type,
1802 &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
1803 if (link_flags == INTERNAL_LOOP_BACK)
1804 bf_set(lpfc_mbx_set_diag_lpbk_type,
1805 &link_diag_loopback->u.req,
1806 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1808 bf_set(lpfc_mbx_set_diag_lpbk_type,
1809 &link_diag_loopback->u.req,
1810 LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
1812 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1813 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1816 phba->link_flag |= LS_LOOPBACK_MODE;
1817 /* wait for the link attention interrupt */
1820 while (phba->link_state != LPFC_HBA_READY) {
1821 if (i++ > timeout) {
1830 lpfc_bsg_diag_mode_exit(phba);
1833 * Let SLI layer release mboxq if mbox command completed after timeout.
1835 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1836 mempool_free(pmboxq, phba->mbox_mem_pool);
1839 /* make error code available to userspace */
1840 job->reply->result = rc;
1841 /* complete the job back to userspace if no error */
1848 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
1849 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1851 * This function is responsible for responding to check and dispatch bsg diag
1852 * command from the user to proper driver action routines.
1855 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
1857 struct Scsi_Host *shost;
1858 struct lpfc_vport *vport;
1859 struct lpfc_hba *phba;
1865 vport = (struct lpfc_vport *)job->shost->hostdata;
1872 if (phba->sli_rev < LPFC_SLI_REV4)
1873 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
1874 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
1875 LPFC_SLI_INTF_IF_TYPE_2)
1876 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
1885 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
1886 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
1888 * This function is responsible for responding to check and dispatch bsg diag
1889 * command from the user to proper driver action routines.
1892 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
1894 struct Scsi_Host *shost;
1895 struct lpfc_vport *vport;
1896 struct lpfc_hba *phba;
1902 vport = (struct lpfc_vport *)job->shost->hostdata;
1909 if (phba->sli_rev < LPFC_SLI_REV4)
1911 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1912 LPFC_SLI_INTF_IF_TYPE_2)
1915 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
1918 rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
1924 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
1925 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
1927 * This function is to perform SLI4 diag link test request from the user
1931 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
1933 struct Scsi_Host *shost;
1934 struct lpfc_vport *vport;
1935 struct lpfc_hba *phba;
1936 LPFC_MBOXQ_t *pmboxq;
1937 struct sli4_link_diag *link_diag_test_cmd;
1938 uint32_t req_len, alloc_len;
1940 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
1941 union lpfc_sli4_cfg_shdr *shdr;
1942 uint32_t shdr_status, shdr_add_status;
1943 struct diag_status *diag_status_reply;
1944 int mbxstatus, rc = 0;
1951 vport = (struct lpfc_vport *)job->shost->hostdata;
1962 if (phba->sli_rev < LPFC_SLI_REV4) {
1966 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
1967 LPFC_SLI_INTF_IF_TYPE_2) {
1972 if (job->request_len < sizeof(struct fc_bsg_request) +
1973 sizeof(struct sli4_link_diag)) {
1974 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1975 "3013 Received LINK DIAG TEST request "
1976 " size:%d below the minimum size:%d\n",
1978 (int)(sizeof(struct fc_bsg_request) +
1979 sizeof(struct sli4_link_diag)));
1984 rc = lpfc_bsg_diag_mode_enter(phba);
1988 link_diag_test_cmd = (struct sli4_link_diag *)
1989 job->request->rqst_data.h_vendor.vendor_cmd;
1990 timeout = link_diag_test_cmd->timeout * 100;
1992 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1997 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2000 goto link_diag_test_exit;
2003 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2004 sizeof(struct lpfc_sli4_cfg_mhdr));
2005 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2006 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2007 req_len, LPFC_SLI4_MBX_EMBED);
2008 if (alloc_len != req_len) {
2010 goto link_diag_test_exit;
2012 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2013 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2014 phba->sli4_hba.link_state.number);
2015 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2016 phba->sli4_hba.link_state.type);
2017 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2018 link_diag_test_cmd->test_id);
2019 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2020 link_diag_test_cmd->loops);
2021 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2022 link_diag_test_cmd->test_version);
2023 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2024 link_diag_test_cmd->error_action);
2026 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2028 shdr = (union lpfc_sli4_cfg_shdr *)
2029 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2030 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2031 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2032 if (shdr_status || shdr_add_status || mbxstatus) {
2033 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2034 "3010 Run link diag test mailbox failed with "
2035 "mbx_status x%x status x%x, add_status x%x\n",
2036 mbxstatus, shdr_status, shdr_add_status);
2039 diag_status_reply = (struct diag_status *)
2040 job->reply->reply_data.vendor_reply.vendor_rsp;
2042 if (job->reply_len <
2043 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2044 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2045 "3012 Received Run link diag test reply "
2046 "below minimum size (%d): reply_len:%d\n",
2047 (int)(sizeof(struct fc_bsg_request) +
2048 sizeof(struct diag_status)),
2054 diag_status_reply->mbox_status = mbxstatus;
2055 diag_status_reply->shdr_status = shdr_status;
2056 diag_status_reply->shdr_add_status = shdr_add_status;
2058 link_diag_test_exit:
2059 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2062 mempool_free(pmboxq, phba->mbox_mem_pool);
2064 lpfc_bsg_diag_mode_exit(phba);
2067 /* make error code available to userspace */
2068 job->reply->result = rc;
2069 /* complete the job back to userspace if no error */
2076 * lpfcdiag_loop_self_reg - obtains a remote port login id
2077 * @phba: Pointer to HBA context object
2078 * @rpi: Pointer to a remote port login id
2080 * This function obtains a remote port login id so the diag loopback test
2081 * can send and receive its own unsolicited CT command.
2083 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2086 struct lpfc_dmabuf *dmabuff;
2089 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2093 if (phba->sli_rev == LPFC_SLI_REV4)
2094 *rpi = lpfc_sli4_alloc_rpi(phba);
2095 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2096 (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
2098 mempool_free(mbox, phba->mbox_mem_pool);
2099 if (phba->sli_rev == LPFC_SLI_REV4)
2100 lpfc_sli4_free_rpi(phba, *rpi);
2104 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2105 mbox->context1 = NULL;
2106 mbox->context2 = NULL;
2107 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2109 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2110 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2112 if (status != MBX_TIMEOUT)
2113 mempool_free(mbox, phba->mbox_mem_pool);
2114 if (phba->sli_rev == LPFC_SLI_REV4)
2115 lpfc_sli4_free_rpi(phba, *rpi);
2119 *rpi = mbox->u.mb.un.varWords[0];
2121 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2123 mempool_free(mbox, phba->mbox_mem_pool);
2128 * lpfcdiag_loop_self_unreg - unregs from the rpi
2129 * @phba: Pointer to HBA context object
2130 * @rpi: Remote port login id
2132 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2134 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2139 /* Allocate mboxq structure */
2140 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2144 lpfc_unreg_login(phba, 0, rpi, mbox);
2145 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2147 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2148 if (status != MBX_TIMEOUT)
2149 mempool_free(mbox, phba->mbox_mem_pool);
2152 mempool_free(mbox, phba->mbox_mem_pool);
2153 if (phba->sli_rev == LPFC_SLI_REV4)
2154 lpfc_sli4_free_rpi(phba, rpi);
2159 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2160 * @phba: Pointer to HBA context object
2161 * @rpi: Remote port login id
2162 * @txxri: Pointer to transmit exchange id
2163 * @rxxri: Pointer to response exchabge id
2165 * This function obtains the transmit and receive ids required to send
2166 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2167 * flags are used to the unsolicted response handler is able to process
2168 * the ct command sent on the same port.
2170 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2171 uint16_t *txxri, uint16_t * rxxri)
2173 struct lpfc_bsg_event *evt;
2174 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2176 struct lpfc_dmabuf *dmabuf;
2177 struct ulp_bde64 *bpl = NULL;
2178 struct lpfc_sli_ct_request *ctreq = NULL;
2182 unsigned long flags;
2186 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2187 SLI_CT_ELX_LOOPBACK);
2191 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2192 list_add(&evt->node, &phba->ct_ev_waiters);
2193 lpfc_bsg_event_ref(evt);
2194 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2196 cmdiocbq = lpfc_sli_get_iocbq(phba);
2197 rspiocbq = lpfc_sli_get_iocbq(phba);
2199 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2201 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2203 INIT_LIST_HEAD(&dmabuf->list);
2204 bpl = (struct ulp_bde64 *) dmabuf->virt;
2205 memset(bpl, 0, sizeof(*bpl));
2206 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2208 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2211 le32_to_cpu(putPaddrLow(dmabuf->phys +
2213 bpl->tus.f.bdeFlags = 0;
2214 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2215 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2219 if (cmdiocbq == NULL || rspiocbq == NULL ||
2220 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2221 dmabuf->virt == NULL) {
2223 goto err_get_xri_exit;
2226 cmd = &cmdiocbq->iocb;
2227 rsp = &rspiocbq->iocb;
2229 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2231 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2232 ctreq->RevisionId.bits.InId = 0;
2233 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2234 ctreq->FsSubType = 0;
2235 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2236 ctreq->CommandResponse.bits.Size = 0;
2239 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2240 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2241 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2242 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2244 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2245 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2246 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2247 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2249 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2250 cmd->ulpBdeCount = 1;
2252 cmd->ulpClass = CLASS3;
2253 cmd->ulpContext = rpi;
2255 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2256 cmdiocbq->vport = phba->pport;
2258 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2260 (phba->fc_ratov * 2)
2261 + LPFC_DRVR_TIMEOUT);
2264 goto err_get_xri_exit;
2266 *txxri = rsp->ulpContext;
2269 evt->wait_time_stamp = jiffies;
2270 time_left = wait_event_interruptible_timeout(
2271 evt->wq, !list_empty(&evt->events_to_see),
2272 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2273 if (list_empty(&evt->events_to_see))
2274 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2276 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2277 list_move(evt->events_to_see.prev, &evt->events_to_get);
2278 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2279 *rxxri = (list_entry(evt->events_to_get.prev,
2280 typeof(struct event_data),
2286 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2287 lpfc_bsg_event_unref(evt); /* release ref */
2288 lpfc_bsg_event_unref(evt); /* delete */
2289 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2293 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2297 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2298 lpfc_sli_release_iocbq(phba, cmdiocbq);
2300 lpfc_sli_release_iocbq(phba, rspiocbq);
2305 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2306 * @phba: Pointer to HBA context object
2308 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2309 * retruns the pointer to the buffer.
2311 static struct lpfc_dmabuf *
2312 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2314 struct lpfc_dmabuf *dmabuf;
2315 struct pci_dev *pcidev = phba->pcidev;
2317 /* allocate dma buffer struct */
2318 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2322 INIT_LIST_HEAD(&dmabuf->list);
2324 /* now, allocate dma buffer */
2325 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2326 &(dmabuf->phys), GFP_KERNEL);
2328 if (!dmabuf->virt) {
2332 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2338 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2339 * @phba: Pointer to HBA context object.
2340 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2342 * This routine just simply frees a dma buffer and its associated buffer
2343 * descriptor referred by @dmabuf.
2346 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2348 struct pci_dev *pcidev = phba->pcidev;
2354 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2355 dmabuf->virt, dmabuf->phys);
2361 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2362 * @phba: Pointer to HBA context object.
2363 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2365 * This routine just simply frees all dma buffers and their associated buffer
2366 * descriptors referred by @dmabuf_list.
2369 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2370 struct list_head *dmabuf_list)
2372 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2374 if (list_empty(dmabuf_list))
2377 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2378 list_del_init(&dmabuf->list);
2379 lpfc_bsg_dma_page_free(phba, dmabuf);
2385 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2386 * @phba: Pointer to HBA context object
2387 * @bpl: Pointer to 64 bit bde structure
2388 * @size: Number of bytes to process
2389 * @nocopydata: Flag to copy user data into the allocated buffer
2391 * This function allocates page size buffers and populates an lpfc_dmabufext.
2392 * If allowed the user data pointed to with indataptr is copied into the kernel
2393 * memory. The chained list of page size buffers is returned.
2395 static struct lpfc_dmabufext *
2396 diag_cmd_data_alloc(struct lpfc_hba *phba,
2397 struct ulp_bde64 *bpl, uint32_t size,
2400 struct lpfc_dmabufext *mlist = NULL;
2401 struct lpfc_dmabufext *dmp;
2402 int cnt, offset = 0, i = 0;
2403 struct pci_dev *pcidev;
2405 pcidev = phba->pcidev;
2408 /* We get chunks of 4K */
2409 if (size > BUF_SZ_4K)
2414 /* allocate struct lpfc_dmabufext buffer header */
2415 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2419 INIT_LIST_HEAD(&dmp->dma.list);
2421 /* Queue it to a linked list */
2423 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2427 /* allocate buffer */
2428 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2439 bpl->tus.f.bdeFlags = 0;
2440 pci_dma_sync_single_for_device(phba->pcidev,
2441 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2444 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2445 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2448 /* build buffer ptr list for IOCB */
2449 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2450 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2451 bpl->tus.f.bdeSize = (ushort) cnt;
2452 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2463 diag_cmd_data_free(phba, mlist);
2468 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2469 * @phba: Pointer to HBA context object
2470 * @rxxri: Receive exchange id
2471 * @len: Number of data bytes
2473 * This function allocates and posts a data buffer of sufficient size to receive
2474 * an unsolicted CT command.
2476 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2479 struct lpfc_sli *psli = &phba->sli;
2480 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2481 struct lpfc_iocbq *cmdiocbq;
2483 struct list_head head, *curr, *next;
2484 struct lpfc_dmabuf *rxbmp;
2485 struct lpfc_dmabuf *dmp;
2486 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2487 struct ulp_bde64 *rxbpl = NULL;
2489 struct lpfc_dmabufext *rxbuffer = NULL;
2494 cmdiocbq = lpfc_sli_get_iocbq(phba);
2495 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2496 if (rxbmp != NULL) {
2497 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2499 INIT_LIST_HEAD(&rxbmp->list);
2500 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2501 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2505 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2507 goto err_post_rxbufs_exit;
2510 /* Queue buffers for the receive exchange */
2511 num_bde = (uint32_t)rxbuffer->flag;
2512 dmp = &rxbuffer->dma;
2514 cmd = &cmdiocbq->iocb;
2517 INIT_LIST_HEAD(&head);
2518 list_add_tail(&head, &dmp->list);
2519 list_for_each_safe(curr, next, &head) {
2520 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2523 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2524 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2525 cmd->un.quexri64cx.buff.bde.addrHigh =
2526 putPaddrHigh(mp[i]->phys);
2527 cmd->un.quexri64cx.buff.bde.addrLow =
2528 putPaddrLow(mp[i]->phys);
2529 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2530 ((struct lpfc_dmabufext *)mp[i])->size;
2531 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2532 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2535 cmd->ulpBdeCount = 1;
2536 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2539 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2540 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2541 cmd->un.cont64[i].tus.f.bdeSize =
2542 ((struct lpfc_dmabufext *)mp[i])->size;
2543 cmd->ulpBdeCount = ++i;
2545 if ((--num_bde > 0) && (i < 2))
2548 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2552 cmd->ulpClass = CLASS3;
2553 cmd->ulpContext = rxxri;
2555 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2557 if (iocb_stat == IOCB_ERROR) {
2558 diag_cmd_data_free(phba,
2559 (struct lpfc_dmabufext *)mp[0]);
2561 diag_cmd_data_free(phba,
2562 (struct lpfc_dmabufext *)mp[1]);
2563 dmp = list_entry(next, struct lpfc_dmabuf, list);
2565 goto err_post_rxbufs_exit;
2568 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2570 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2574 /* The iocb was freed by lpfc_sli_issue_iocb */
2575 cmdiocbq = lpfc_sli_get_iocbq(phba);
2577 dmp = list_entry(next, struct lpfc_dmabuf, list);
2579 goto err_post_rxbufs_exit;
2582 cmd = &cmdiocbq->iocb;
2587 err_post_rxbufs_exit:
2591 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2596 lpfc_sli_release_iocbq(phba, cmdiocbq);
2601 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2602 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2604 * This function receives a user data buffer to be transmitted and received on
2605 * the same port, the link must be up and in loopback mode prior
2607 * 1. A kernel buffer is allocated to copy the user data into.
2608 * 2. The port registers with "itself".
2609 * 3. The transmit and receive exchange ids are obtained.
2610 * 4. The receive exchange id is posted.
2611 * 5. A new els loopback event is created.
2612 * 6. The command and response iocbs are allocated.
2613 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2615 * This function is meant to be called n times while the port is in loopback
2616 * so it is the apps responsibility to issue a reset to take the port out
2620 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2622 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2623 struct lpfc_hba *phba = vport->phba;
2624 struct diag_mode_test *diag_mode;
2625 struct lpfc_bsg_event *evt;
2626 struct event_data *evdat;
2627 struct lpfc_sli *psli = &phba->sli;
2630 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2632 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2634 struct lpfc_sli_ct_request *ctreq;
2635 struct lpfc_dmabuf *txbmp;
2636 struct ulp_bde64 *txbpl = NULL;
2637 struct lpfc_dmabufext *txbuffer = NULL;
2638 struct list_head head;
2639 struct lpfc_dmabuf *curr;
2640 uint16_t txxri, rxxri;
2642 uint8_t *ptr = NULL, *rx_databuf = NULL;
2646 unsigned long flags;
2647 void *dataout = NULL;
2650 /* in case no data is returned return just the return code */
2651 job->reply->reply_payload_rcv_len = 0;
2653 if (job->request_len <
2654 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2655 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2656 "2739 Received DIAG TEST request below minimum "
2659 goto loopback_test_exit;
2662 if (job->request_payload.payload_len !=
2663 job->reply_payload.payload_len) {
2665 goto loopback_test_exit;
2668 diag_mode = (struct diag_mode_test *)
2669 job->request->rqst_data.h_vendor.vendor_cmd;
2671 if ((phba->link_state == LPFC_HBA_ERROR) ||
2672 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2673 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2675 goto loopback_test_exit;
2678 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2680 goto loopback_test_exit;
2683 size = job->request_payload.payload_len;
2684 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2686 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2688 goto loopback_test_exit;
2691 if (full_size >= BUF_SZ_4K) {
2693 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2694 * then we allocate 64k and re-use that buffer over and over to
2695 * xfer the whole block. This is because Linux kernel has a
2696 * problem allocating more than 120k of kernel space memory. Saw
2697 * problem with GET_FCPTARGETMAPPING...
2699 if (size <= (64 * 1024))
2700 total_mem = full_size;
2702 total_mem = 64 * 1024;
2704 /* Allocate memory for ioctl data */
2705 total_mem = BUF_SZ_4K;
2707 dataout = kmalloc(total_mem, GFP_KERNEL);
2708 if (dataout == NULL) {
2710 goto loopback_test_exit;
2714 ptr += ELX_LOOPBACK_HEADER_SZ;
2715 sg_copy_to_buffer(job->request_payload.sg_list,
2716 job->request_payload.sg_cnt,
2718 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2720 goto loopback_test_exit;
2722 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2724 lpfcdiag_loop_self_unreg(phba, rpi);
2725 goto loopback_test_exit;
2728 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2730 lpfcdiag_loop_self_unreg(phba, rpi);
2731 goto loopback_test_exit;
2734 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2735 SLI_CT_ELX_LOOPBACK);
2737 lpfcdiag_loop_self_unreg(phba, rpi);
2739 goto loopback_test_exit;
2742 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2743 list_add(&evt->node, &phba->ct_ev_waiters);
2744 lpfc_bsg_event_ref(evt);
2745 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2747 cmdiocbq = lpfc_sli_get_iocbq(phba);
2748 rspiocbq = lpfc_sli_get_iocbq(phba);
2749 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2752 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2754 INIT_LIST_HEAD(&txbmp->list);
2755 txbpl = (struct ulp_bde64 *) txbmp->virt;
2756 txbuffer = diag_cmd_data_alloc(phba,
2757 txbpl, full_size, 0);
2761 if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
2764 goto err_loopback_test_exit;
2767 cmd = &cmdiocbq->iocb;
2768 rsp = &rspiocbq->iocb;
2770 INIT_LIST_HEAD(&head);
2771 list_add_tail(&head, &txbuffer->dma.list);
2772 list_for_each_entry(curr, &head, list) {
2773 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2774 if (current_offset == 0) {
2776 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2777 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2778 ctreq->RevisionId.bits.InId = 0;
2779 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2780 ctreq->FsSubType = 0;
2781 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
2782 ctreq->CommandResponse.bits.Size = size;
2783 segment_offset = ELX_LOOPBACK_HEADER_SZ;
2787 BUG_ON(segment_offset >= segment_len);
2788 memcpy(curr->virt + segment_offset,
2789 ptr + current_offset,
2790 segment_len - segment_offset);
2792 current_offset += segment_len - segment_offset;
2793 BUG_ON(current_offset > size);
2797 /* Build the XMIT_SEQUENCE iocb */
2799 num_bde = (uint32_t)txbuffer->flag;
2801 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
2802 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
2803 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2804 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
2806 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
2807 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2808 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2809 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2811 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
2812 cmd->ulpBdeCount = 1;
2814 cmd->ulpClass = CLASS3;
2815 cmd->ulpContext = txxri;
2817 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2818 cmdiocbq->vport = phba->pport;
2820 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2821 rspiocbq, (phba->fc_ratov * 2) +
2824 if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
2826 goto err_loopback_test_exit;
2830 time_left = wait_event_interruptible_timeout(
2831 evt->wq, !list_empty(&evt->events_to_see),
2832 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2834 if (list_empty(&evt->events_to_see))
2835 rc = (time_left) ? -EINTR : -ETIMEDOUT;
2837 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2838 list_move(evt->events_to_see.prev, &evt->events_to_get);
2839 evdat = list_entry(evt->events_to_get.prev,
2840 typeof(*evdat), node);
2841 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2842 rx_databuf = evdat->data;
2843 if (evdat->len != full_size) {
2844 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2845 "1603 Loopback test did not receive expected "
2846 "data length. actual length 0x%x expected "
2848 evdat->len, full_size);
2850 } else if (rx_databuf == NULL)
2854 /* skip over elx loopback header */
2855 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
2856 job->reply->reply_payload_rcv_len =
2857 sg_copy_from_buffer(job->reply_payload.sg_list,
2858 job->reply_payload.sg_cnt,
2860 job->reply->reply_payload_rcv_len = size;
2864 err_loopback_test_exit:
2865 lpfcdiag_loop_self_unreg(phba, rpi);
2867 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2868 lpfc_bsg_event_unref(evt); /* release ref */
2869 lpfc_bsg_event_unref(evt); /* delete */
2870 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2872 if (cmdiocbq != NULL)
2873 lpfc_sli_release_iocbq(phba, cmdiocbq);
2875 if (rspiocbq != NULL)
2876 lpfc_sli_release_iocbq(phba, rspiocbq);
2878 if (txbmp != NULL) {
2879 if (txbpl != NULL) {
2880 if (txbuffer != NULL)
2881 diag_cmd_data_free(phba, txbuffer);
2882 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
2889 /* make error code available to userspace */
2890 job->reply->result = rc;
2891 job->dd_data = NULL;
2892 /* complete the job back to userspace if no error */
2899 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
2900 * @job: GET_DFC_REV fc_bsg_job
2903 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
2905 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2906 struct lpfc_hba *phba = vport->phba;
2907 struct get_mgmt_rev *event_req;
2908 struct get_mgmt_rev_reply *event_reply;
2911 if (job->request_len <
2912 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
2913 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2914 "2740 Received GET_DFC_REV request below "
2920 event_req = (struct get_mgmt_rev *)
2921 job->request->rqst_data.h_vendor.vendor_cmd;
2923 event_reply = (struct get_mgmt_rev_reply *)
2924 job->reply->reply_data.vendor_reply.vendor_rsp;
2926 if (job->reply_len <
2927 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
2928 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2929 "2741 Received GET_DFC_REV reply below "
2935 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
2936 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
2938 job->reply->result = rc;
2945 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
2946 * @phba: Pointer to HBA context object.
2947 * @pmboxq: Pointer to mailbox command.
2949 * This is completion handler function for mailbox commands issued from
2950 * lpfc_bsg_issue_mbox function. This function is called by the
2951 * mailbox event handler function with no lock held. This function
2952 * will wake up thread waiting on the wait queue pointed by context1
2956 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2958 struct bsg_job_data *dd_data;
2959 struct fc_bsg_job *job;
2961 unsigned long flags;
2962 uint8_t *pmb, *pmb_buf;
2964 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2965 dd_data = pmboxq->context1;
2966 /* job already timed out? */
2968 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2973 * The outgoing buffer is readily referred from the dma buffer,
2974 * just need to get header part from mailboxq structure.
2976 pmb = (uint8_t *)&pmboxq->u.mb;
2977 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
2978 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
2980 job = dd_data->context_un.mbox.set_job;
2982 size = job->reply_payload.payload_len;
2983 job->reply->reply_payload_rcv_len =
2984 sg_copy_from_buffer(job->reply_payload.sg_list,
2985 job->reply_payload.sg_cnt,
2987 /* need to hold the lock until we set job->dd_data to NULL
2988 * to hold off the timeout handler returning to the mid-layer
2989 * while we are still processing the job.
2991 job->dd_data = NULL;
2992 dd_data->context_un.mbox.set_job = NULL;
2993 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2995 dd_data->context_un.mbox.set_job = NULL;
2996 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2999 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3000 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3004 job->reply->result = 0;
3011 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3012 * @phba: Pointer to HBA context object.
3013 * @mb: Pointer to a mailbox object.
3014 * @vport: Pointer to a vport object.
3016 * Some commands require the port to be offline, some may not be called from
3019 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3020 MAILBOX_t *mb, struct lpfc_vport *vport)
3022 /* return negative error values for bsg job */
3023 switch (mb->mbxCommand) {
3027 case MBX_CONFIG_LINK:
3028 case MBX_CONFIG_RING:
3029 case MBX_RESET_RING:
3030 case MBX_UNREG_LOGIN:
3032 case MBX_DUMP_CONTEXT:
3036 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3037 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3038 "2743 Command 0x%x is illegal in on-line "
3044 case MBX_WRITE_VPARMS:
3047 case MBX_READ_CONFIG:
3048 case MBX_READ_RCONFIG:
3049 case MBX_READ_STATUS:
3052 case MBX_READ_LNK_STAT:
3053 case MBX_DUMP_MEMORY:
3055 case MBX_UPDATE_CFG:
3056 case MBX_KILL_BOARD:
3058 case MBX_LOAD_EXP_ROM:
3060 case MBX_DEL_LD_ENTRY:
3063 case MBX_SLI4_CONFIG:
3064 case MBX_READ_EVENT_LOG:
3065 case MBX_READ_EVENT_LOG_STATUS:
3066 case MBX_WRITE_EVENT_LOG:
3067 case MBX_PORT_CAPABILITIES:
3068 case MBX_PORT_IOV_CONTROL:
3069 case MBX_RUN_BIU_DIAG64:
3071 case MBX_SET_VARIABLE:
3072 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3073 "1226 mbox: set_variable 0x%x, 0x%x\n",
3075 mb->un.varWords[1]);
3076 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3077 && (mb->un.varWords[1] == 1)) {
3078 phba->wait_4_mlo_maint_flg = 1;
3079 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3080 phba->link_flag &= ~LS_LOOPBACK_MODE;
3081 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3084 case MBX_READ_SPARM64:
3085 case MBX_READ_TOPOLOGY:
3087 case MBX_REG_LOGIN64:
3088 case MBX_CONFIG_PORT:
3089 case MBX_RUN_BIU_DIAG:
3091 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3092 "2742 Unknown Command 0x%x\n",
3101 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3102 * @phba: Pointer to HBA context object.
3104 * This is routine clean up and reset BSG handling of multi-buffer mbox
3108 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3110 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3113 /* free all memory, including dma buffers */
3114 lpfc_bsg_dma_page_list_free(phba,
3115 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3116 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3117 /* multi-buffer write mailbox command pass-through complete */
3118 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3119 sizeof(struct lpfc_mbox_ext_buf_ctx));
3120 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3126 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3127 * @phba: Pointer to HBA context object.
3128 * @pmboxq: Pointer to mailbox command.
3130 * This is routine handles BSG job for mailbox commands completions with
3131 * multiple external buffers.
3133 static struct fc_bsg_job *
3134 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3136 struct bsg_job_data *dd_data;
3137 struct fc_bsg_job *job;
3138 uint8_t *pmb, *pmb_buf;
3139 unsigned long flags;
3143 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3144 dd_data = pmboxq->context1;
3145 /* has the job already timed out? */
3147 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3153 * The outgoing buffer is readily referred from the dma buffer,
3154 * just need to get header part from mailboxq structure.
3156 pmb = (uint8_t *)&pmboxq->u.mb;
3157 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3158 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3160 job = dd_data->context_un.mbox.set_job;
3162 size = job->reply_payload.payload_len;
3163 job->reply->reply_payload_rcv_len =
3164 sg_copy_from_buffer(job->reply_payload.sg_list,
3165 job->reply_payload.sg_cnt,
3167 /* result for successful */
3168 job->reply->result = 0;
3169 job->dd_data = NULL;
3170 /* need to hold the lock util we set job->dd_data to NULL
3171 * to hold off the timeout handler from midlayer to take
3174 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3175 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3176 "2937 SLI_CONFIG ext-buffer maibox command "
3177 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3178 phba->mbox_ext_buf_ctx.nembType,
3179 phba->mbox_ext_buf_ctx.mboxType, size);
3181 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3185 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3186 "2938 SLI_CONFIG ext-buffer maibox "
3187 "command (x%x/x%x) failure, rc:x%x\n",
3188 phba->mbox_ext_buf_ctx.nembType,
3189 phba->mbox_ext_buf_ctx.mboxType, rc);
3191 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3198 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3199 * @phba: Pointer to HBA context object.
3200 * @pmboxq: Pointer to mailbox command.
3202 * This is completion handler function for mailbox read commands with multiple
3206 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3208 struct fc_bsg_job *job;
3210 /* handle the BSG job with mailbox command */
3211 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3212 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3214 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3215 "2939 SLI_CONFIG ext-buffer rd maibox command "
3216 "complete, ctxState:x%x, mbxStatus:x%x\n",
3217 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3219 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3221 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3222 lpfc_bsg_mbox_ext_session_reset(phba);
3224 /* free base driver mailbox structure memory */
3225 mempool_free(pmboxq, phba->mbox_mem_pool);
3227 /* complete the bsg job if we have it */
3235 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3236 * @phba: Pointer to HBA context object.
3237 * @pmboxq: Pointer to mailbox command.
3239 * This is completion handler function for mailbox write commands with multiple
3243 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3245 struct fc_bsg_job *job;
3247 /* handle the BSG job with the mailbox command */
3248 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3249 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3251 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3252 "2940 SLI_CONFIG ext-buffer wr maibox command "
3253 "complete, ctxState:x%x, mbxStatus:x%x\n",
3254 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3256 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3258 /* free all memory, including dma buffers */
3259 mempool_free(pmboxq, phba->mbox_mem_pool);
3260 lpfc_bsg_mbox_ext_session_reset(phba);
3262 /* complete the bsg job if we have it */
3270 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3271 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3272 struct lpfc_dmabuf *ext_dmabuf)
3274 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3276 /* pointer to the start of mailbox command */
3277 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3279 if (nemb_tp == nemb_mse) {
3281 sli_cfg_mbx->un.sli_config_emb0_subsys.
3283 putPaddrHigh(mbx_dmabuf->phys +
3285 sli_cfg_mbx->un.sli_config_emb0_subsys.
3287 putPaddrLow(mbx_dmabuf->phys +
3289 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3290 "2943 SLI_CONFIG(mse)[%d], "
3291 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3293 sli_cfg_mbx->un.sli_config_emb0_subsys.
3295 sli_cfg_mbx->un.sli_config_emb0_subsys.
3297 sli_cfg_mbx->un.sli_config_emb0_subsys.
3300 sli_cfg_mbx->un.sli_config_emb0_subsys.
3302 putPaddrHigh(ext_dmabuf->phys);
3303 sli_cfg_mbx->un.sli_config_emb0_subsys.
3305 putPaddrLow(ext_dmabuf->phys);
3306 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3307 "2944 SLI_CONFIG(mse)[%d], "
3308 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3310 sli_cfg_mbx->un.sli_config_emb0_subsys.
3312 sli_cfg_mbx->un.sli_config_emb0_subsys.
3314 sli_cfg_mbx->un.sli_config_emb0_subsys.
3319 sli_cfg_mbx->un.sli_config_emb1_subsys.
3321 putPaddrHigh(mbx_dmabuf->phys +
3323 sli_cfg_mbx->un.sli_config_emb1_subsys.
3325 putPaddrLow(mbx_dmabuf->phys +
3327 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3328 "3007 SLI_CONFIG(hbd)[%d], "
3329 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3331 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3333 sli_config_emb1_subsys.hbd[index]),
3334 sli_cfg_mbx->un.sli_config_emb1_subsys.
3336 sli_cfg_mbx->un.sli_config_emb1_subsys.
3340 sli_cfg_mbx->un.sli_config_emb1_subsys.
3342 putPaddrHigh(ext_dmabuf->phys);
3343 sli_cfg_mbx->un.sli_config_emb1_subsys.
3345 putPaddrLow(ext_dmabuf->phys);
3346 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3347 "3008 SLI_CONFIG(hbd)[%d], "
3348 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3350 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3352 sli_config_emb1_subsys.hbd[index]),
3353 sli_cfg_mbx->un.sli_config_emb1_subsys.
3355 sli_cfg_mbx->un.sli_config_emb1_subsys.
3363 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3364 * @phba: Pointer to HBA context object.
3365 * @mb: Pointer to a BSG mailbox object.
3366 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3367 * @dmabuff: Pointer to a DMA buffer descriptor.
3369 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3370 * non-embedded external bufffers.
3373 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3374 enum nemb_type nemb_tp,
3375 struct lpfc_dmabuf *dmabuf)
3377 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3378 struct dfc_mbox_req *mbox_req;
3379 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3380 uint32_t ext_buf_cnt, ext_buf_index;
3381 struct lpfc_dmabuf *ext_dmabuf = NULL;
3382 struct bsg_job_data *dd_data = NULL;
3383 LPFC_MBOXQ_t *pmboxq = NULL;
3389 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3391 /* pointer to the start of mailbox command */
3392 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3394 if (nemb_tp == nemb_mse) {
3395 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3396 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3397 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3398 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3399 "2945 Handled SLI_CONFIG(mse) rd, "
3400 "ext_buf_cnt(%d) out of range(%d)\n",
3402 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3406 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3407 "2941 Handled SLI_CONFIG(mse) rd, "
3408 "ext_buf_cnt:%d\n", ext_buf_cnt);
3410 /* sanity check on interface type for support */
3411 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3412 LPFC_SLI_INTF_IF_TYPE_2) {
3416 /* nemb_tp == nemb_hbd */
3417 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3418 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3419 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3420 "2946 Handled SLI_CONFIG(hbd) rd, "
3421 "ext_buf_cnt(%d) out of range(%d)\n",
3423 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3427 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3428 "2942 Handled SLI_CONFIG(hbd) rd, "
3429 "ext_buf_cnt:%d\n", ext_buf_cnt);
3432 /* reject non-embedded mailbox command with none external buffer */
3433 if (ext_buf_cnt == 0) {
3436 } else if (ext_buf_cnt > 1) {
3437 /* additional external read buffers */
3438 for (i = 1; i < ext_buf_cnt; i++) {
3439 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3444 list_add_tail(&ext_dmabuf->list,
3445 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3449 /* bsg tracking structure */
3450 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3456 /* mailbox command structure for base driver */
3457 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3462 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3464 /* for the first external buffer */
3465 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3467 /* for the rest of external buffer descriptors if any */
3468 if (ext_buf_cnt > 1) {
3470 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3471 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3472 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3473 ext_buf_index, dmabuf,
3479 /* construct base driver mbox command */
3480 pmb = &pmboxq->u.mb;
3481 pmbx = (uint8_t *)dmabuf->virt;
3482 memcpy(pmb, pmbx, sizeof(*pmb));
3483 pmb->mbxOwner = OWN_HOST;
3484 pmboxq->vport = phba->pport;
3486 /* multi-buffer handling context */
3487 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3488 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3489 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3490 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3491 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3492 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3494 /* callback for multi-buffer read mailbox command */
3495 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3497 /* context fields to callback function */
3498 pmboxq->context1 = dd_data;
3499 dd_data->type = TYPE_MBOX;
3500 dd_data->context_un.mbox.pmboxq = pmboxq;
3501 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3502 dd_data->context_un.mbox.set_job = job;
3503 job->dd_data = dd_data;
3506 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3508 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3509 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3510 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3511 "2947 Issued SLI_CONFIG ext-buffer "
3512 "maibox command, rc:x%x\n", rc);
3513 return SLI_CONFIG_HANDLED;
3515 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3516 "2948 Failed to issue SLI_CONFIG ext-buffer "
3517 "maibox command, rc:x%x\n", rc);
3522 mempool_free(pmboxq, phba->mbox_mem_pool);
3523 lpfc_bsg_dma_page_list_free(phba,
3524 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3526 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3531 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3532 * @phba: Pointer to HBA context object.
3533 * @mb: Pointer to a BSG mailbox object.
3534 * @dmabuff: Pointer to a DMA buffer descriptor.
3536 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3537 * non-embedded external bufffers.
3540 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3541 enum nemb_type nemb_tp,
3542 struct lpfc_dmabuf *dmabuf)
3544 struct dfc_mbox_req *mbox_req;
3545 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3546 uint32_t ext_buf_cnt;
3547 struct bsg_job_data *dd_data = NULL;
3548 LPFC_MBOXQ_t *pmboxq = NULL;
3551 int rc = SLI_CONFIG_NOT_HANDLED, i;
3554 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3556 /* pointer to the start of mailbox command */
3557 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3559 if (nemb_tp == nemb_mse) {
3560 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3561 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3562 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3563 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3564 "2953 Handled SLI_CONFIG(mse) wr, "
3565 "ext_buf_cnt(%d) out of range(%d)\n",
3567 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3570 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3571 "2949 Handled SLI_CONFIG(mse) wr, "
3572 "ext_buf_cnt:%d\n", ext_buf_cnt);
3574 /* sanity check on interface type for support */
3575 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3576 LPFC_SLI_INTF_IF_TYPE_2)
3578 /* nemb_tp == nemb_hbd */
3579 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3580 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3581 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3582 "2954 Handled SLI_CONFIG(hbd) wr, "
3583 "ext_buf_cnt(%d) out of range(%d)\n",
3585 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3588 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3589 "2950 Handled SLI_CONFIG(hbd) wr, "
3590 "ext_buf_cnt:%d\n", ext_buf_cnt);
3593 if (ext_buf_cnt == 0)
3596 /* for the first external buffer */
3597 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3599 /* log for looking forward */
3600 for (i = 1; i < ext_buf_cnt; i++) {
3601 if (nemb_tp == nemb_mse)
3602 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3603 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3604 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3607 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3608 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3609 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3610 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3614 /* multi-buffer handling context */
3615 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3616 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3617 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3618 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3619 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3620 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3622 if (ext_buf_cnt == 1) {
3623 /* bsg tracking structure */
3624 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3630 /* mailbox command structure for base driver */
3631 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3636 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3637 pmb = &pmboxq->u.mb;
3638 mbx = (uint8_t *)dmabuf->virt;
3639 memcpy(pmb, mbx, sizeof(*pmb));
3640 pmb->mbxOwner = OWN_HOST;
3641 pmboxq->vport = phba->pport;
3643 /* callback for multi-buffer read mailbox command */
3644 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3646 /* context fields to callback function */
3647 pmboxq->context1 = dd_data;
3648 dd_data->type = TYPE_MBOX;
3649 dd_data->context_un.mbox.pmboxq = pmboxq;
3650 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3651 dd_data->context_un.mbox.set_job = job;
3652 job->dd_data = dd_data;
3655 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3657 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3658 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3659 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3660 "2955 Issued SLI_CONFIG ext-buffer "
3661 "maibox command, rc:x%x\n", rc);
3662 return SLI_CONFIG_HANDLED;
3664 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3665 "2956 Failed to issue SLI_CONFIG ext-buffer "
3666 "maibox command, rc:x%x\n", rc);
3670 /* wait for additoinal external buffers */
3671 job->reply->result = 0;
3673 return SLI_CONFIG_HANDLED;
3677 mempool_free(pmboxq, phba->mbox_mem_pool);
3684 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3685 * @phba: Pointer to HBA context object.
3686 * @mb: Pointer to a BSG mailbox object.
3687 * @dmabuff: Pointer to a DMA buffer descriptor.
3689 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3690 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3691 * with embedded sussystem 0x1 and opcodes with external HBDs.
3694 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3695 struct lpfc_dmabuf *dmabuf)
3697 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3700 int rc = SLI_CONFIG_NOT_HANDLED;
3703 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3705 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3707 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3708 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3709 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3710 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3711 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3712 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3713 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3715 case FCOE_OPCODE_READ_FCF:
3716 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3717 "2957 Handled SLI_CONFIG "
3718 "subsys_fcoe, opcode:x%x\n",
3720 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3723 case FCOE_OPCODE_ADD_FCF:
3724 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3725 "2958 Handled SLI_CONFIG "
3726 "subsys_fcoe, opcode:x%x\n",
3728 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3732 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3733 "2959 Not handled SLI_CONFIG "
3734 "subsys_fcoe, opcode:x%x\n",
3736 rc = SLI_CONFIG_NOT_HANDLED;
3740 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3741 "2977 Handled SLI_CONFIG "
3742 "subsys:x%d, opcode:x%x\n",
3744 rc = SLI_CONFIG_NOT_HANDLED;
3747 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
3748 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3749 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
3750 &sli_cfg_mbx->un.sli_config_emb1_subsys);
3751 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
3753 case COMN_OPCODE_READ_OBJECT:
3754 case COMN_OPCODE_READ_OBJECT_LIST:
3755 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3756 "2960 Handled SLI_CONFIG "
3757 "subsys_comn, opcode:x%x\n",
3759 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
3762 case COMN_OPCODE_WRITE_OBJECT:
3763 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3764 "2961 Handled SLI_CONFIG "
3765 "subsys_comn, opcode:x%x\n",
3767 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
3771 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3772 "2962 Not handled SLI_CONFIG "
3773 "subsys_comn, opcode:x%x\n",
3775 rc = SLI_CONFIG_NOT_HANDLED;
3779 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3780 "2978 Handled SLI_CONFIG "
3781 "subsys:x%d, opcode:x%x\n",
3783 rc = SLI_CONFIG_NOT_HANDLED;
3790 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
3791 * @phba: Pointer to HBA context object.
3793 * This routine is for requesting to abort a pass-through mailbox command with
3794 * multiple external buffers due to error condition.
3797 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
3799 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
3800 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
3802 lpfc_bsg_mbox_ext_session_reset(phba);
3807 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
3808 * @phba: Pointer to HBA context object.
3809 * @dmabuf: Pointer to a DMA buffer descriptor.
3811 * This routine extracts the next mailbox read external buffer back to
3812 * user space through BSG.
3815 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
3817 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3818 struct lpfc_dmabuf *dmabuf;
3823 index = phba->mbox_ext_buf_ctx.seqNum;
3824 phba->mbox_ext_buf_ctx.seqNum++;
3826 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3827 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3829 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3830 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
3831 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
3832 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3833 "2963 SLI_CONFIG (mse) ext-buffer rd get "
3834 "buffer[%d], size:%d\n", index, size);
3836 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3837 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
3838 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3839 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
3840 "buffer[%d], size:%d\n", index, size);
3842 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
3844 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
3845 struct lpfc_dmabuf, list);
3846 list_del_init(&dmabuf->list);
3847 pbuf = (uint8_t *)dmabuf->virt;
3848 job->reply->reply_payload_rcv_len =
3849 sg_copy_from_buffer(job->reply_payload.sg_list,
3850 job->reply_payload.sg_cnt,
3853 lpfc_bsg_dma_page_free(phba, dmabuf);
3855 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3856 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3857 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
3858 "command session done\n");
3859 lpfc_bsg_mbox_ext_session_reset(phba);
3862 job->reply->result = 0;
3865 return SLI_CONFIG_HANDLED;
3869 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
3870 * @phba: Pointer to HBA context object.
3871 * @dmabuf: Pointer to a DMA buffer descriptor.
3873 * This routine sets up the next mailbox read external buffer obtained
3874 * from user space through BSG.
3877 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
3878 struct lpfc_dmabuf *dmabuf)
3880 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3881 struct bsg_job_data *dd_data = NULL;
3882 LPFC_MBOXQ_t *pmboxq = NULL;
3884 enum nemb_type nemb_tp;
3890 index = phba->mbox_ext_buf_ctx.seqNum;
3891 phba->mbox_ext_buf_ctx.seqNum++;
3892 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
3894 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
3895 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3897 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3903 pbuf = (uint8_t *)dmabuf->virt;
3904 size = job->request_payload.payload_len;
3905 sg_copy_to_buffer(job->request_payload.sg_list,
3906 job->request_payload.sg_cnt,
3909 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
3910 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3911 "2966 SLI_CONFIG (mse) ext-buffer wr set "
3912 "buffer[%d], size:%d\n",
3913 phba->mbox_ext_buf_ctx.seqNum, size);
3916 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3917 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
3918 "buffer[%d], size:%d\n",
3919 phba->mbox_ext_buf_ctx.seqNum, size);
3923 /* set up external buffer descriptor and add to external buffer list */
3924 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
3925 phba->mbox_ext_buf_ctx.mbx_dmabuf,
3927 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3929 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
3930 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3931 "2968 SLI_CONFIG ext-buffer wr all %d "
3932 "ebuffers received\n",
3933 phba->mbox_ext_buf_ctx.numBuf);
3934 /* mailbox command structure for base driver */
3935 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3940 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3941 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
3942 pmb = &pmboxq->u.mb;
3943 memcpy(pmb, pbuf, sizeof(*pmb));
3944 pmb->mbxOwner = OWN_HOST;
3945 pmboxq->vport = phba->pport;
3947 /* callback for multi-buffer write mailbox command */
3948 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3950 /* context fields to callback function */
3951 pmboxq->context1 = dd_data;
3952 dd_data->type = TYPE_MBOX;
3953 dd_data->context_un.mbox.pmboxq = pmboxq;
3954 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
3955 dd_data->context_un.mbox.set_job = job;
3956 job->dd_data = dd_data;
3959 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3961 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3962 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3963 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3964 "2969 Issued SLI_CONFIG ext-buffer "
3965 "maibox command, rc:x%x\n", rc);
3966 return SLI_CONFIG_HANDLED;
3968 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3969 "2970 Failed to issue SLI_CONFIG ext-buffer "
3970 "maibox command, rc:x%x\n", rc);
3975 /* wait for additoinal external buffers */
3976 job->reply->result = 0;
3978 return SLI_CONFIG_HANDLED;
3981 lpfc_bsg_dma_page_free(phba, dmabuf);
3988 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
3989 * @phba: Pointer to HBA context object.
3990 * @mb: Pointer to a BSG mailbox object.
3991 * @dmabuff: Pointer to a DMA buffer descriptor.
3993 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
3994 * command with multiple non-embedded external buffers.
3997 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
3998 struct lpfc_dmabuf *dmabuf)
4002 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4003 "2971 SLI_CONFIG buffer (type:x%x)\n",
4004 phba->mbox_ext_buf_ctx.mboxType);
4006 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4007 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4008 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4009 "2972 SLI_CONFIG rd buffer state "
4011 phba->mbox_ext_buf_ctx.state);
4012 lpfc_bsg_mbox_ext_abort(phba);
4015 rc = lpfc_bsg_read_ebuf_get(phba, job);
4016 if (rc == SLI_CONFIG_HANDLED)
4017 lpfc_bsg_dma_page_free(phba, dmabuf);
4018 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4019 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4020 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4021 "2973 SLI_CONFIG wr buffer state "
4023 phba->mbox_ext_buf_ctx.state);
4024 lpfc_bsg_mbox_ext_abort(phba);
4027 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4033 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4034 * @phba: Pointer to HBA context object.
4035 * @mb: Pointer to a BSG mailbox object.
4036 * @dmabuff: Pointer to a DMA buffer descriptor.
4038 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4039 * (0x9B) mailbox commands and external buffers.
4042 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4043 struct lpfc_dmabuf *dmabuf)
4045 struct dfc_mbox_req *mbox_req;
4046 int rc = SLI_CONFIG_NOT_HANDLED;
4049 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4051 /* mbox command with/without single external buffer */
4052 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4055 /* mbox command and first external buffer */
4056 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4057 if (mbox_req->extSeqNum == 1) {
4058 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4059 "2974 SLI_CONFIG mailbox: tag:%d, "
4060 "seq:%d\n", mbox_req->extMboxTag,
4061 mbox_req->extSeqNum);
4062 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4065 goto sli_cfg_ext_error;
4069 * handle additional external buffers
4072 /* check broken pipe conditions */
4073 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4074 goto sli_cfg_ext_error;
4075 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4076 goto sli_cfg_ext_error;
4077 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4078 goto sli_cfg_ext_error;
4080 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4081 "2975 SLI_CONFIG mailbox external buffer: "
4082 "extSta:x%x, tag:%d, seq:%d\n",
4083 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4084 mbox_req->extSeqNum);
4085 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4089 /* all other cases, broken pipe */
4090 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4091 "2976 SLI_CONFIG mailbox broken pipe: "
4092 "ctxSta:x%x, ctxNumBuf:%d "
4093 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4094 phba->mbox_ext_buf_ctx.state,
4095 phba->mbox_ext_buf_ctx.numBuf,
4096 phba->mbox_ext_buf_ctx.mbxTag,
4097 phba->mbox_ext_buf_ctx.seqNum,
4098 mbox_req->extMboxTag, mbox_req->extSeqNum);
4100 lpfc_bsg_mbox_ext_session_reset(phba);
4106 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4107 * @phba: Pointer to HBA context object.
4108 * @mb: Pointer to a mailbox object.
4109 * @vport: Pointer to a vport object.
4111 * Allocate a tracking object, mailbox command memory, get a mailbox
4112 * from the mailbox pool, copy the caller mailbox command.
4114 * If offline and the sli is active we need to poll for the command (port is
4115 * being reset) and com-plete the job, otherwise issue the mailbox command and
4116 * let our completion handler finish the command.
4119 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4120 struct lpfc_vport *vport)
4122 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4123 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4124 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4125 uint8_t *pmbx = NULL;
4126 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4127 struct lpfc_dmabuf *dmabuf = NULL;
4128 struct dfc_mbox_req *mbox_req;
4129 struct READ_EVENT_LOG_VAR *rdEventLog;
4130 uint32_t transmit_length, receive_length, mode;
4131 struct lpfc_mbx_sli4_config *sli4_config;
4132 struct lpfc_mbx_nembed_cmd *nembed_sge;
4133 struct mbox_header *header;
4134 struct ulp_bde64 *bde;
4135 uint8_t *ext = NULL;
4141 /* in case no data is transferred */
4142 job->reply->reply_payload_rcv_len = 0;
4144 /* sanity check to protect driver */
4145 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4146 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4152 * Don't allow mailbox commands to be sent when blocked or when in
4153 * the middle of discovery
4155 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4161 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4163 /* check if requested extended data lengths are valid */
4164 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4165 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4170 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4171 if (!dmabuf || !dmabuf->virt) {
4176 /* Get the mailbox command or external buffer from BSG */
4177 pmbx = (uint8_t *)dmabuf->virt;
4178 size = job->request_payload.payload_len;
4179 sg_copy_to_buffer(job->request_payload.sg_list,
4180 job->request_payload.sg_cnt, pmbx, size);
4182 /* Handle possible SLI_CONFIG with non-embedded payloads */
4183 if (phba->sli_rev == LPFC_SLI_REV4) {
4184 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4185 if (rc == SLI_CONFIG_HANDLED)
4189 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4192 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4194 goto job_done; /* must be negative */
4196 /* allocate our bsg tracking structure */
4197 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4199 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4200 "2727 Failed allocation of dd_data\n");
4205 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4210 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4212 pmb = &pmboxq->u.mb;
4213 memcpy(pmb, pmbx, sizeof(*pmb));
4214 pmb->mbxOwner = OWN_HOST;
4215 pmboxq->vport = vport;
4217 /* If HBA encountered an error attention, allow only DUMP
4218 * or RESTART mailbox commands until the HBA is restarted.
4220 if (phba->pport->stopped &&
4221 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4222 pmb->mbxCommand != MBX_RESTART &&
4223 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4224 pmb->mbxCommand != MBX_WRITE_WWN)
4225 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4226 "2797 mbox: Issued mailbox cmd "
4227 "0x%x while in stopped state.\n",
4230 /* extended mailbox commands will need an extended buffer */
4231 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4232 /* any data for the device? */
4233 if (mbox_req->inExtWLen) {
4235 ext = from + sizeof(MAILBOX_t);
4237 pmboxq->context2 = ext;
4238 pmboxq->in_ext_byte_len =
4239 mbox_req->inExtWLen * sizeof(uint32_t);
4240 pmboxq->out_ext_byte_len =
4241 mbox_req->outExtWLen * sizeof(uint32_t);
4242 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4245 /* biu diag will need a kernel buffer to transfer the data
4246 * allocate our own buffer and setup the mailbox command to
4249 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4250 transmit_length = pmb->un.varWords[1];
4251 receive_length = pmb->un.varWords[4];
4252 /* transmit length cannot be greater than receive length or
4253 * mailbox extension size
4255 if ((transmit_length > receive_length) ||
4256 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4260 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4261 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4262 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4263 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4265 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4266 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4267 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4268 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4269 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4270 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4271 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4272 rdEventLog = &pmb->un.varRdEventLog;
4273 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4274 mode = bf_get(lpfc_event_log, rdEventLog);
4276 /* receive length cannot be greater than mailbox
4279 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4284 /* mode zero uses a bde like biu diags command */
4286 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4287 + sizeof(MAILBOX_t));
4288 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4289 + sizeof(MAILBOX_t));
4291 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4292 if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
4293 /* rebuild the command for sli4 using our own buffers
4294 * like we do for biu diags
4296 receive_length = pmb->un.varWords[2];
4297 /* receive length cannot be greater than mailbox
4300 if (receive_length == 0) {
4304 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4305 + sizeof(MAILBOX_t));
4306 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4307 + sizeof(MAILBOX_t));
4308 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4309 pmb->un.varUpdateCfg.co) {
4310 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4312 /* bde size cannot be greater than mailbox ext size */
4313 if (bde->tus.f.bdeSize >
4314 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4318 bde->addrHigh = putPaddrHigh(dmabuf->phys
4319 + sizeof(MAILBOX_t));
4320 bde->addrLow = putPaddrLow(dmabuf->phys
4321 + sizeof(MAILBOX_t));
4322 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4323 /* Handling non-embedded SLI_CONFIG mailbox command */
4324 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4325 if (!bf_get(lpfc_mbox_hdr_emb,
4326 &sli4_config->header.cfg_mhdr)) {
4327 /* rebuild the command for sli4 using our
4328 * own buffers like we do for biu diags
4330 header = (struct mbox_header *)
4331 &pmb->un.varWords[0];
4332 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4333 &pmb->un.varWords[0];
4334 receive_length = nembed_sge->sge[0].length;
4336 /* receive length cannot be greater than
4337 * mailbox extension size
4339 if ((receive_length == 0) ||
4341 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4346 nembed_sge->sge[0].pa_hi =
4347 putPaddrHigh(dmabuf->phys
4348 + sizeof(MAILBOX_t));
4349 nembed_sge->sge[0].pa_lo =
4350 putPaddrLow(dmabuf->phys
4351 + sizeof(MAILBOX_t));
4356 dd_data->context_un.mbox.dmabuffers = dmabuf;
4358 /* setup wake call as IOCB callback */
4359 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4361 /* setup context field to pass wait_queue pointer to wake function */
4362 pmboxq->context1 = dd_data;
4363 dd_data->type = TYPE_MBOX;
4364 dd_data->context_un.mbox.pmboxq = pmboxq;
4365 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4366 dd_data->context_un.mbox.set_job = job;
4367 dd_data->context_un.mbox.ext = ext;
4368 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4369 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4370 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4371 job->dd_data = dd_data;
4373 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4374 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4375 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4376 if (rc != MBX_SUCCESS) {
4377 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4381 /* job finished, copy the data */
4382 memcpy(pmbx, pmb, sizeof(*pmb));
4383 job->reply->reply_payload_rcv_len =
4384 sg_copy_from_buffer(job->reply_payload.sg_list,
4385 job->reply_payload.sg_cnt,
4387 /* not waiting mbox already done */
4392 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4393 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4394 return 1; /* job started */
4397 /* common exit for error or job completed inline */
4399 mempool_free(pmboxq, phba->mbox_mem_pool);
4400 lpfc_bsg_dma_page_free(phba, dmabuf);
4408 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4409 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4412 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4414 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4415 struct lpfc_hba *phba = vport->phba;
4416 struct dfc_mbox_req *mbox_req;
4419 /* mix-and-match backward compatibility */
4420 job->reply->reply_payload_rcv_len = 0;
4421 if (job->request_len <
4422 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4423 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4424 "2737 Mix-and-match backward compability "
4425 "between MBOX_REQ old size:%d and "
4426 "new request size:%d\n",
4427 (int)(job->request_len -
4428 sizeof(struct fc_bsg_request)),
4429 (int)sizeof(struct dfc_mbox_req));
4430 mbox_req = (struct dfc_mbox_req *)
4431 job->request->rqst_data.h_vendor.vendor_cmd;
4432 mbox_req->extMboxTag = 0;
4433 mbox_req->extSeqNum = 0;
4436 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4440 job->reply->result = 0;
4441 job->dd_data = NULL;
4444 /* job submitted, will complete later*/
4445 rc = 0; /* return zero, no error */
4447 /* some error occurred */
4448 job->reply->result = rc;
4449 job->dd_data = NULL;
4456 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4457 * @phba: Pointer to HBA context object.
4458 * @cmdiocbq: Pointer to command iocb.
4459 * @rspiocbq: Pointer to response iocb.
4461 * This function is the completion handler for iocbs issued using
4462 * lpfc_menlo_cmd function. This function is called by the
4463 * ring event handler function without any lock held. This function
4464 * can be called from both worker thread context and interrupt
4465 * context. This function also can be called from another thread which
4466 * cleans up the SLI layer objects.
4467 * This function copies the contents of the response iocb to the
4468 * response iocb memory object provided by the caller of
4469 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4470 * sleeps for the iocb completion.
4473 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4474 struct lpfc_iocbq *cmdiocbq,
4475 struct lpfc_iocbq *rspiocbq)
4477 struct bsg_job_data *dd_data;
4478 struct fc_bsg_job *job;
4480 struct lpfc_dmabuf *bmp;
4481 struct lpfc_bsg_menlo *menlo;
4482 unsigned long flags;
4483 struct menlo_response *menlo_resp;
4486 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4487 dd_data = cmdiocbq->context1;
4489 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4493 menlo = &dd_data->context_un.menlo;
4494 job = menlo->set_job;
4495 job->dd_data = NULL; /* so timeout handler does not reply */
4497 spin_lock(&phba->hbalock);
4498 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4499 if (cmdiocbq->context2 && rspiocbq)
4500 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4501 &rspiocbq->iocb, sizeof(IOCB_t));
4502 spin_unlock(&phba->hbalock);
4505 rspiocbq = menlo->rspiocbq;
4506 rsp = &rspiocbq->iocb;
4508 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4509 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4510 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4511 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4513 /* always return the xri, this would be used in the case
4514 * of a menlo download to allow the data to be sent as a continuation
4517 menlo_resp = (struct menlo_response *)
4518 job->reply->reply_data.vendor_reply.vendor_rsp;
4519 menlo_resp->xri = rsp->ulpContext;
4520 if (rsp->ulpStatus) {
4521 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4522 switch (rsp->un.ulpWord[4] & 0xff) {
4523 case IOERR_SEQUENCE_TIMEOUT:
4526 case IOERR_INVALID_RPI:
4536 job->reply->reply_payload_rcv_len =
4537 rsp->un.genreq64.bdl.bdeSize;
4539 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4540 lpfc_sli_release_iocbq(phba, rspiocbq);
4541 lpfc_sli_release_iocbq(phba, cmdiocbq);
4544 /* make error code available to userspace */
4545 job->reply->result = rc;
4546 /* complete the job back to userspace */
4548 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4553 * lpfc_menlo_cmd - send an ioctl for menlo hardware
4554 * @job: fc_bsg_job to handle
4556 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4557 * all the command completions will return the xri for the command.
4558 * For menlo data requests a gen request 64 CX is used to continue the exchange
4559 * supplied in the menlo request header xri field.
4562 lpfc_menlo_cmd(struct fc_bsg_job *job)
4564 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4565 struct lpfc_hba *phba = vport->phba;
4566 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4569 struct menlo_command *menlo_cmd;
4570 struct menlo_response *menlo_resp;
4571 struct lpfc_dmabuf *bmp = NULL;
4574 struct scatterlist *sgel = NULL;
4577 struct bsg_job_data *dd_data;
4578 struct ulp_bde64 *bpl = NULL;
4580 /* in case no data is returned return just the return code */
4581 job->reply->reply_payload_rcv_len = 0;
4583 if (job->request_len <
4584 sizeof(struct fc_bsg_request) +
4585 sizeof(struct menlo_command)) {
4586 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4587 "2784 Received MENLO_CMD request below "
4593 if (job->reply_len <
4594 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4595 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4596 "2785 Received MENLO_CMD reply below "
4602 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4603 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4604 "2786 Adapter does not support menlo "
4610 menlo_cmd = (struct menlo_command *)
4611 job->request->rqst_data.h_vendor.vendor_cmd;
4613 menlo_resp = (struct menlo_response *)
4614 job->reply->reply_data.vendor_reply.vendor_rsp;
4616 /* allocate our bsg tracking structure */
4617 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4619 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4620 "2787 Failed allocation of dd_data\n");
4625 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4631 cmdiocbq = lpfc_sli_get_iocbq(phba);
4637 rspiocbq = lpfc_sli_get_iocbq(phba);
4643 rsp = &rspiocbq->iocb;
4645 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4651 INIT_LIST_HEAD(&bmp->list);
4652 bpl = (struct ulp_bde64 *) bmp->virt;
4653 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4654 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4655 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4656 busaddr = sg_dma_address(sgel);
4657 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4658 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4659 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4660 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4661 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4665 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4666 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4667 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4668 busaddr = sg_dma_address(sgel);
4669 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4670 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4671 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4672 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4673 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4677 cmd = &cmdiocbq->iocb;
4678 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4679 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
4680 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
4681 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
4682 cmd->un.genreq64.bdl.bdeSize =
4683 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
4684 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
4685 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
4686 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
4687 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
4688 cmd->ulpBdeCount = 1;
4689 cmd->ulpClass = CLASS3;
4690 cmd->ulpOwner = OWN_CHIP;
4691 cmd->ulpLe = 1; /* Limited Edition */
4692 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
4693 cmdiocbq->vport = phba->pport;
4694 /* We want the firmware to timeout before we do */
4695 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
4696 cmdiocbq->context3 = bmp;
4697 cmdiocbq->context2 = rspiocbq;
4698 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
4699 cmdiocbq->context1 = dd_data;
4700 cmdiocbq->context2 = rspiocbq;
4701 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
4702 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
4703 cmd->ulpPU = MENLO_PU; /* 3 */
4704 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
4705 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
4707 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
4709 cmd->un.ulpWord[4] = 0;
4710 cmd->ulpContext = menlo_cmd->xri;
4713 dd_data->type = TYPE_MENLO;
4714 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
4715 dd_data->context_un.menlo.rspiocbq = rspiocbq;
4716 dd_data->context_un.menlo.set_job = job;
4717 dd_data->context_un.menlo.bmp = bmp;
4719 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
4721 if (rc == IOCB_SUCCESS)
4722 return 0; /* done for now */
4724 /* iocb failed so cleanup */
4725 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4726 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4727 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4728 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4730 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4733 lpfc_sli_release_iocbq(phba, rspiocbq);
4735 lpfc_sli_release_iocbq(phba, cmdiocbq);
4741 /* make error code available to userspace */
4742 job->reply->result = rc;
4743 job->dd_data = NULL;
4748 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
4749 * @job: fc_bsg_job to handle
4752 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
4754 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
4758 case LPFC_BSG_VENDOR_SET_CT_EVENT:
4759 rc = lpfc_bsg_hba_set_event(job);
4761 case LPFC_BSG_VENDOR_GET_CT_EVENT:
4762 rc = lpfc_bsg_hba_get_event(job);
4764 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
4765 rc = lpfc_bsg_send_mgmt_rsp(job);
4767 case LPFC_BSG_VENDOR_DIAG_MODE:
4768 rc = lpfc_bsg_diag_loopback_mode(job);
4770 case LPFC_BSG_VENDOR_DIAG_MODE_END:
4771 rc = lpfc_sli4_bsg_diag_mode_end(job);
4773 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
4774 rc = lpfc_bsg_diag_loopback_run(job);
4776 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
4777 rc = lpfc_sli4_bsg_link_diag_test(job);
4779 case LPFC_BSG_VENDOR_GET_MGMT_REV:
4780 rc = lpfc_bsg_get_dfc_rev(job);
4782 case LPFC_BSG_VENDOR_MBOX:
4783 rc = lpfc_bsg_mbox_cmd(job);
4785 case LPFC_BSG_VENDOR_MENLO_CMD:
4786 case LPFC_BSG_VENDOR_MENLO_DATA:
4787 rc = lpfc_menlo_cmd(job);
4791 job->reply->reply_payload_rcv_len = 0;
4792 /* make error code available to userspace */
4793 job->reply->result = rc;
4801 * lpfc_bsg_request - handle a bsg request from the FC transport
4802 * @job: fc_bsg_job to handle
4805 lpfc_bsg_request(struct fc_bsg_job *job)
4810 msgcode = job->request->msgcode;
4812 case FC_BSG_HST_VENDOR:
4813 rc = lpfc_bsg_hst_vendor(job);
4815 case FC_BSG_RPT_ELS:
4816 rc = lpfc_bsg_rport_els(job);
4819 rc = lpfc_bsg_send_mgmt_cmd(job);
4823 job->reply->reply_payload_rcv_len = 0;
4824 /* make error code available to userspace */
4825 job->reply->result = rc;
4833 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
4834 * @job: fc_bsg_job that has timed out
4836 * This function just aborts the job's IOCB. The aborted IOCB will return to
4837 * the waiting function which will handle passing the error back to userspace
4840 lpfc_bsg_timeout(struct fc_bsg_job *job)
4842 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4843 struct lpfc_hba *phba = vport->phba;
4844 struct lpfc_iocbq *cmdiocb;
4845 struct lpfc_bsg_event *evt;
4846 struct lpfc_bsg_iocb *iocb;
4847 struct lpfc_bsg_mbox *mbox;
4848 struct lpfc_bsg_menlo *menlo;
4849 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
4850 struct bsg_job_data *dd_data;
4851 unsigned long flags;
4853 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4854 dd_data = (struct bsg_job_data *)job->dd_data;
4855 /* timeout and completion crossed paths if no dd_data */
4857 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4861 switch (dd_data->type) {
4863 iocb = &dd_data->context_un.iocb;
4864 cmdiocb = iocb->cmdiocbq;
4865 /* hint to completion handler that the job timed out */
4866 job->reply->result = -EAGAIN;
4867 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4868 /* this will call our completion handler */
4869 spin_lock_irq(&phba->hbalock);
4870 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
4871 spin_unlock_irq(&phba->hbalock);
4874 evt = dd_data->context_un.evt;
4875 /* this event has no job anymore */
4876 evt->set_job = NULL;
4877 job->dd_data = NULL;
4878 job->reply->reply_payload_rcv_len = 0;
4879 /* Return -EAGAIN which is our way of signallying the
4882 job->reply->result = -EAGAIN;
4883 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4887 mbox = &dd_data->context_un.mbox;
4888 /* this mbox has no job anymore */
4889 mbox->set_job = NULL;
4890 job->dd_data = NULL;
4891 job->reply->reply_payload_rcv_len = 0;
4892 job->reply->result = -EAGAIN;
4893 /* the mbox completion handler can now be run */
4894 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4896 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4897 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4900 menlo = &dd_data->context_un.menlo;
4901 cmdiocb = menlo->cmdiocbq;
4902 /* hint to completion handler that the job timed out */
4903 job->reply->result = -EAGAIN;
4904 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4905 /* this will call our completion handler */
4906 spin_lock_irq(&phba->hbalock);
4907 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
4908 spin_unlock_irq(&phba->hbalock);
4911 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4915 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
4916 * otherwise an error message will be displayed on the console
4917 * so always return success (zero)