]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jun 2014 01:54:06 +0000 (18:54 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 10 Jun 2014 01:54:06 +0000 (18:54 -0700)
Pull SCSI updates from James Bottomley:
 "This patch consists of the usual driver updates (qla2xxx, qla4xxx,
  lpfc, be2iscsi, fnic, ufs, NCR5380) The NCR5380 is the addition to
  maintained status of a long neglected driver for older hardware.  In
  addition there are a lot of minor fixes and cleanups and some more
  updates to make scsi mq ready"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (130 commits)
  include/scsi/osd_protocol.h: remove unnecessary __constant
  mvsas: Recognise device/subsystem 9485/9485 as 88SE9485
  Revert "be2iscsi: Fix processing cqe for cxn whose endpoint is freed"
  mptfusion: fix msgContext in mptctl_hp_hostinfo
  acornscsi: remove linked command support
  scsi/NCR5380: dprintk macro
  fusion: Remove use of DEF_SCSI_QCMD
  fusion: Add free msg frames to the head, not tail of list
  mpt2sas: Add free smids to the head, not tail of list
  mpt2sas: Remove use of DEF_SCSI_QCMD
  mpt2sas: Remove uses of serial_number
  mpt3sas: Remove use of DEF_SCSI_QCMD
  mpt3sas: Remove uses of serial_number
  qla2xxx: Use kmemdup instead of kmalloc + memcpy
  qla4xxx: Use kmemdup instead of kmalloc + memcpy
  qla2xxx: fix incorrect debug printk
  be2iscsi: Bump the driver version
  be2iscsi: Fix processing cqe for cxn whose endpoint is freed
  be2iscsi: Fix destroy MCC-CQ before MCC-EQ is destroyed
  be2iscsi: Fix memory corruption in MBX path
  ...

117 files changed:
Documentation/scsi/LICENSE.qla2xxx
MAINTAINERS
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.h
drivers/message/fusion/mptspi.c
drivers/scsi/NCR5380.c
drivers/scsi/NCR5380.h
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/arm/acornscsi.c
drivers/scsi/arm/cumana_1.c
drivers/scsi/arm/oak.c
drivers/scsi/atari_NCR5380.c
drivers/scsi/atari_scsi.c
drivers/scsi/atari_scsi.h
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bnx2fc/bnx2fc_hwi.c
drivers/scsi/dtc.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fnic/fnic.h
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/fnic/fnic_fcs.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/fnic/fnic_scsi.c
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/fnic/fnic_trace.h
drivers/scsi/g_NCR5380.c
drivers/scsi/g_NCR5380.h
drivers/scsi/hpsa.c
drivers/scsi/hpsa.h
drivers/scsi/libiscsi.c
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/mac_scsi.c
drivers/scsi/mac_scsi.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_base.h
drivers/scsi/mpt3sas/mpt3sas_ctl.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pas16.h
drivers/scsi/pm8001/pm8001_ctl.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_bsg.c
drivers/scsi/qla2xxx/qla_bsg.h
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_dbg.h
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_dfs.c
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_inline.h
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_mr.c
drivers/scsi/qla2xxx/qla_mr.h
drivers/scsi/qla2xxx/qla_nx.c
drivers/scsi/qla2xxx/qla_nx.h
drivers/scsi/qla2xxx/qla_nx2.c
drivers/scsi/qla2xxx/qla_nx2.h
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_settings.h
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/qla2xxx/qla_tmpl.c
drivers/scsi/qla2xxx/qla_tmpl.h
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/qla4xxx/ql4_83xx.c
drivers/scsi/qla4xxx/ql4_83xx.h
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_fw.h
drivers/scsi/qla4xxx/ql4_glbl.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_isr.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sr.c
drivers/scsi/sun3_NCR5380.c
drivers/scsi/sun3_scsi.c
drivers/scsi/sun3_scsi.h
drivers/scsi/sun3_scsi_vme.c
drivers/scsi/t128.c
drivers/scsi/t128.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/scsi/ufs/ufshci.h
drivers/scsi/virtio_scsi.c
include/scsi/osd_protocol.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_driver.h

index 5020b7b5a24410f5cac04c2a8cbc849b3546a8e8..52f0b4359234ffddd1a5d5ea13cd18e05d3b5623 100644 (file)
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2013 QLogic Corporation
+Copyright (c) 2003-2014 QLogic Corporation
 QLogic Linux FC-FCoE Driver
 
 This program includes a device driver for Linux 3.x.
index 77556fc91cdd0892d845e620e612b61e644c4ce1..a1f4b576628a5193c3dc5a73fe6fffc8b14a32e4 100644 (file)
@@ -4390,7 +4390,7 @@ S:        Supported
 F:     drivers/crypto/nx/
 
 IBM Power 842 compression accelerator
-M:     Robert Jennings <rcj@linux.vnet.ibm.com>
+M:     Nathan Fontenot <nfont@linux.vnet.ibm.com>
 S:     Supported
 F:     drivers/crypto/nx/nx-842.c
 F:     include/linux/nx842.h
@@ -4406,12 +4406,18 @@ L:      netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/ibm/ibmveth.*
 
-IBM Power Virtual SCSI/FC Device Drivers
-M:     Robert Jennings <rcj@linux.vnet.ibm.com>
+IBM Power Virtual SCSI Device Drivers
+M:     Nathan Fontenot <nfont@linux.vnet.ibm.com>
 L:     linux-scsi@vger.kernel.org
 S:     Supported
-F:     drivers/scsi/ibmvscsi/
-X:     drivers/scsi/ibmvscsi/ibmvstgt.c
+F:     drivers/scsi/ibmvscsi/ibmvscsi*
+F:     drivers/scsi/ibmvscsi/viosrp.h
+
+IBM Power Virtual FC Device Drivers
+M:     Brian King <brking@linux.vnet.ibm.com>
+L:     linux-scsi@vger.kernel.org
+S:     Supported
+F:     drivers/scsi/ibmvscsi/ibmvfc*
 
 IBM ServeRAID RAID DRIVER
 P:     Jack Hammer
@@ -6028,6 +6034,28 @@ M:       Petr Vandrovec <petr@vandrovec.name>
 S:     Odd Fixes
 F:     fs/ncpfs/
 
+NCR 5380 SCSI DRIVERS
+M:     Finn Thain <fthain@telegraphics.com.au>
+M:     Michael Schmitz <schmitzmic@gmail.com>
+L:     linux-scsi@vger.kernel.org
+S:     Maintained
+F:     Documentation/scsi/g_NCR5380.txt
+F:     drivers/scsi/NCR5380.*
+F:     drivers/scsi/arm/cumana_1.c
+F:     drivers/scsi/arm/oak.c
+F:     drivers/scsi/atari_NCR5380.c
+F:     drivers/scsi/atari_scsi.*
+F:     drivers/scsi/dmx3191d.c
+F:     drivers/scsi/dtc.*
+F:     drivers/scsi/g_NCR5380.*
+F:     drivers/scsi/g_NCR5380_mmio.c
+F:     drivers/scsi/mac_scsi.*
+F:     drivers/scsi/pas16.*
+F:     drivers/scsi/sun3_NCR5380.c
+F:     drivers/scsi/sun3_scsi.*
+F:     drivers/scsi/sun3_scsi_vme.c
+F:     drivers/scsi/t128.*
+
 NCR DUAL 700 SCSI DRIVER (MICROCHANNEL)
 M:     "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
 L:     linux-scsi@vger.kernel.org
index 570b18a113ffca813d2588dfc02c879e626fae9e..ebc0af7d769c01699aa8077b4231a5f61fc86ec2 100644 (file)
@@ -1037,7 +1037,7 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
                goto out;
        /* signature to know if this mf is freed */
        mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
-       list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+       list_add(&mf->u.frame.linkage.list, &ioc->FreeQ);
 #ifdef MFCNT
        ioc->mfcnt--;
 #endif
index dcc8385adeb3b8b830a58cfad62f19604122a52b..8a050e8856881ada1094954c5ce04f9584ed36e7 100644 (file)
@@ -2432,9 +2432,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
        int                     rc, cim_rev;
        ToolboxIstwiReadWriteRequest_t  *IstwiRWRequest;
        MPT_FRAME_HDR           *mf = NULL;
-       MPIHeader_t             *mpi_hdr;
        unsigned long           timeleft;
        int                     retval;
+       u32                     msgcontext;
 
        /* Reset long to int. Should affect IA64 and SPARC only
         */
@@ -2581,11 +2581,11 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
        }
 
        IstwiRWRequest = (ToolboxIstwiReadWriteRequest_t *)mf;
-       mpi_hdr = (MPIHeader_t *) mf;
+       msgcontext = IstwiRWRequest->MsgContext;
        memset(IstwiRWRequest,0,sizeof(ToolboxIstwiReadWriteRequest_t));
+       IstwiRWRequest->MsgContext = msgcontext;
        IstwiRWRequest->Function = MPI_FUNCTION_TOOLBOX;
        IstwiRWRequest->Tool = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
-       IstwiRWRequest->MsgContext = mpi_hdr->MsgContext;
        IstwiRWRequest->Flags = MPI_TB_ISTWI_FLAGS_READ;
        IstwiRWRequest->NumAddressBytes = 0x01;
        IstwiRWRequest->DataLength = cpu_to_le16(0x04);
index fd75108c355e49b5c4ea035f60139b6a466c9a2c..02a3eefd6931480e096b79e708c10ec9d936d522 100644 (file)
@@ -649,7 +649,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
 }
 
 static int
-mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptfc_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
 {
        struct mptfc_rport_info *ri;
        struct fc_rport *rport = starget_to_rport(scsi_target(SCpnt->device));
@@ -658,14 +658,14 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 
        if (!vdevice || !vdevice->vtarget) {
                SCpnt->result = DID_NO_CONNECT << 16;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
        err = fc_remote_port_chkready(rport);
        if (unlikely(err)) {
                SCpnt->result = err;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
@@ -673,15 +673,13 @@ mptfc_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
        ri = *((struct mptfc_rport_info **)rport->dd_data);
        if (unlikely(!ri)) {
                SCpnt->result = DID_IMM_RETRY << 16;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
-       return mptscsih_qcmd(SCpnt,done);
+       return mptscsih_qcmd(SCpnt);
 }
 
-static DEF_SCSI_QCMD(mptfc_qcmd)
-
 /*
  *     mptfc_display_port_link_speed - displaying link speed
  *     @ioc: Pointer to MPT_ADAPTER structure
index 00d339c361fc0ecbd0b9b086e8c5756d28983d77..711fcb5cec8750556de23248aa424af5f4deb084 100644 (file)
@@ -1896,7 +1896,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
 }
 
 static int
-mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
 {
        MPT_SCSI_HOST   *hd;
        MPT_ADAPTER     *ioc;
@@ -1904,11 +1904,11 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 
        if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
                SCpnt->result = DID_NO_CONNECT << 16;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
-       hd = shost_priv(SCpnt->device->host);
+       hd = shost_priv(shost);
        ioc = hd->ioc;
 
        if (ioc->sas_discovery_quiesce_io)
@@ -1917,11 +1917,9 @@ mptsas_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
        if (ioc->debug_level & MPT_DEBUG_SCSI)
                scsi_print_command(SCpnt);
 
-       return mptscsih_qcmd(SCpnt,done);
+       return mptscsih_qcmd(SCpnt);
 }
 
-static DEF_SCSI_QCMD(mptsas_qcmd)
-
 /**
  *     mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
  *             if the device under question is currently in the
index 727819cc703422578c1f0f71c77efff1333f1c6c..2a1c6f21af2789205e51bd2ecdb1d29eab058580 100644 (file)
@@ -1304,7 +1304,6 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
 /**
  *     mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
  *     @SCpnt: Pointer to scsi_cmnd structure
- *     @done: Pointer SCSI mid-layer IO completion function
  *
  *     (linux scsi_host_template.queuecommand routine)
  *     This is the primary SCSI IO start routine.  Create a MPI SCSIIORequest
@@ -1313,7 +1312,7 @@ int mptscsih_show_info(struct seq_file *m, struct Scsi_Host *host)
  *     Returns 0. (rtn value discarded by linux scsi mid-layer)
  */
 int
-mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptscsih_qcmd(struct scsi_cmnd *SCpnt)
 {
        MPT_SCSI_HOST           *hd;
        MPT_FRAME_HDR           *mf;
@@ -1329,10 +1328,9 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 
        hd = shost_priv(SCpnt->device->host);
        ioc = hd->ioc;
-       SCpnt->scsi_done = done;
 
-       dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
-               ioc->name, SCpnt, done));
+       dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p\n",
+               ioc->name, SCpnt));
 
        if (ioc->taskmgmt_quiesce_io)
                return SCSI_MLQUEUE_HOST_BUSY;
index 83f503162f7a46846ce4e842362ca83782d49f75..99e3390807f3bef9fedd0c2eb8225dd86cd84999 100644 (file)
@@ -113,7 +113,7 @@ extern int mptscsih_resume(struct pci_dev *pdev);
 #endif
 extern int mptscsih_show_info(struct seq_file *, struct Scsi_Host *);
 extern const char * mptscsih_info(struct Scsi_Host *SChost);
-extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt);
 extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
        u8 id, int lun, int ctx2abort, ulong timeout);
 extern void mptscsih_slave_destroy(struct scsi_device *device);
index 5653e505f91ff0af540db4e7bb973ef3f03059a4..49d11338294bbe31e6e07c1e14aded1670ab733a 100644 (file)
@@ -780,33 +780,31 @@ static int mptspi_slave_configure(struct scsi_device *sdev)
 }
 
 static int
-mptspi_qcmd_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+mptspi_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
 {
-       struct _MPT_SCSI_HOST *hd = shost_priv(SCpnt->device->host);
+       struct _MPT_SCSI_HOST *hd = shost_priv(shost);
        VirtDevice      *vdevice = SCpnt->device->hostdata;
        MPT_ADAPTER *ioc = hd->ioc;
 
        if (!vdevice || !vdevice->vtarget) {
                SCpnt->result = DID_NO_CONNECT << 16;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
        if (SCpnt->device->channel == 1 &&
                mptscsih_is_phys_disk(ioc, 0, SCpnt->device->id) == 0) {
                SCpnt->result = DID_NO_CONNECT << 16;
-               done(SCpnt);
+               SCpnt->scsi_done(SCpnt);
                return 0;
        }
 
        if (spi_dv_pending(scsi_target(SCpnt->device)))
                ddvprintk(ioc, scsi_print_command(SCpnt));
 
-       return mptscsih_qcmd(SCpnt,done);
+       return mptscsih_qcmd(SCpnt);
 }
 
-static DEF_SCSI_QCMD(mptspi_qcmd)
-
 static void mptspi_slave_destroy(struct scsi_device *sdev)
 {
        struct scsi_target *starget = scsi_target(sdev);
index bcd2238682271b017341e1be471dc4af36cfd87b..93d13fc9a2937712dbbe549a539a9add2ca4157c 100644 (file)
@@ -27,8 +27,6 @@
  */
 
 /*
- * $Log: NCR5380.c,v $
-
  * Revision 1.10 1998/9/2      Alan Cox
  *                             (alan@lxorguk.ukuu.org.uk)
  * Fixed up the timer lockups reported so far. Things still suck. Looking 
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_transport_spi.h>
 
-#ifndef NDEBUG
-#define NDEBUG 0
-#endif
-#ifndef NDEBUG_ABORT
-#define NDEBUG_ABORT 0
-#endif
-
 #if (NDEBUG & NDEBUG_LISTS)
 #define LIST(x,y) {printk("LINE:%d   Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); if ((x)==(y)) udelay(5); }
 #define REMOVE(w,x,y,z) {printk("LINE:%d   Removing: %p->%p  %p->%p \n", __LINE__, (void*)(w), (void*)(x), (void*)(y), (void*)(z)); if ((x)==(y)) udelay(5); }
@@ -1005,7 +996,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)
                LIST(cmd, tmp);
                tmp->host_scribble = (unsigned char *) cmd;
        }
-       dprintk(NDEBUG_QUEUES, ("scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"));
+       dprintk(NDEBUG_QUEUES, "scsi%d : command added to %s of queue\n", instance->host_no, (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
 
        /* Run the coroutine if it isn't already running. */
        /* Kick off command processing */
@@ -1040,7 +1031,7 @@ static void NCR5380_main(struct work_struct *work)
                /* Lock held here */
                done = 1;
                if (!hostdata->connected && !hostdata->selecting) {
-                       dprintk(NDEBUG_MAIN, ("scsi%d : not connected\n", instance->host_no));
+                       dprintk(NDEBUG_MAIN, "scsi%d : not connected\n", instance->host_no);
                        /*
                         * Search through the issue_queue for a command destined
                         * for a target that's not busy.
@@ -1048,7 +1039,7 @@ static void NCR5380_main(struct work_struct *work)
                        for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *) tmp->host_scribble) 
                        {
                                if (prev != tmp)
-                                       dprintk(NDEBUG_LISTS, ("MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->target, hostdata->busy[tmp->target], tmp->lun));
+                                       dprintk(NDEBUG_LISTS, "MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
                                /*  When we find one, remove it from the issue queue. */
                                if (!(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun))) {
                                        if (prev) {
@@ -1066,7 +1057,7 @@ static void NCR5380_main(struct work_struct *work)
                                         * On failure, we must add the command back to the
                                         *   issue queue so we can keep trying. 
                                         */
-                                       dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->target, tmp->lun));
+                                       dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main() : command for target %d lun %d removed from issue_queue\n", instance->host_no, tmp->device->id, tmp->device->lun);
        
                                        /*
                                         * A successful selection is defined as one that 
@@ -1095,7 +1086,7 @@ static void NCR5380_main(struct work_struct *work)
                                                tmp->host_scribble = (unsigned char *) hostdata->issue_queue;
                                                hostdata->issue_queue = tmp;
                                                done = 0;
-                                               dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, ("scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no));
+                                               dprintk(NDEBUG_MAIN|NDEBUG_QUEUES, "scsi%d : main(): select() failed, returned to issue_queue\n", instance->host_no);
                                        }
                                        /* lock held here still */
                                }       /* if target/lun is not busy */
@@ -1125,9 +1116,9 @@ static void NCR5380_main(struct work_struct *work)
 #endif
                    && (!hostdata->time_expires || time_before_eq(hostdata->time_expires, jiffies))
                    ) {
-                       dprintk(NDEBUG_MAIN, ("scsi%d : main() : performing information transfer\n", instance->host_no));
+                       dprintk(NDEBUG_MAIN, "scsi%d : main() : performing information transfer\n", instance->host_no);
                        NCR5380_information_transfer(instance);
-                       dprintk(NDEBUG_MAIN, ("scsi%d : main() : done set false\n", instance->host_no));
+                       dprintk(NDEBUG_MAIN, "scsi%d : main() : done set false\n", instance->host_no);
                        done = 0;
                } else
                        break;
@@ -1159,8 +1150,8 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
        unsigned char basr;
        unsigned long flags;
 
-       dprintk(NDEBUG_INTR, ("scsi : NCR5380 irq %d triggered\n",
-               instance->irq));
+       dprintk(NDEBUG_INTR, "scsi : NCR5380 irq %d triggered\n",
+               instance->irq);
 
        do {
                done = 1;
@@ -1173,14 +1164,14 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
                        NCR5380_dprint(NDEBUG_INTR, instance);
                        if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) {
                                done = 0;
-                               dprintk(NDEBUG_INTR, ("scsi%d : SEL interrupt\n", instance->host_no));
+                               dprintk(NDEBUG_INTR, "scsi%d : SEL interrupt\n", instance->host_no);
                                NCR5380_reselect(instance);
                                (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                        } else if (basr & BASR_PARITY_ERROR) {
-                               dprintk(NDEBUG_INTR, ("scsi%d : PARITY interrupt\n", instance->host_no));
+                               dprintk(NDEBUG_INTR, "scsi%d : PARITY interrupt\n", instance->host_no);
                                (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                        } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
-                               dprintk(NDEBUG_INTR, ("scsi%d : RESET interrupt\n", instance->host_no));
+                               dprintk(NDEBUG_INTR, "scsi%d : RESET interrupt\n", instance->host_no);
                                (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                        } else {
 #if defined(REAL_DMA)
@@ -1210,7 +1201,7 @@ static irqreturn_t NCR5380_intr(int dummy, void *dev_id)
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                                }
 #else
-                               dprintk(NDEBUG_INTR, ("scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)));
+                               dprintk(NDEBUG_INTR, "scsi : unknown interrupt, BASR 0x%X, MR 0x%X, SR 0x%x\n", basr, NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG));
                                (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 #endif
                        }
@@ -1304,7 +1295,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
        hostdata->restart_select = 0;
 
        NCR5380_dprint(NDEBUG_ARBITRATION, instance);
-       dprintk(NDEBUG_ARBITRATION, ("scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id));
+       dprintk(NDEBUG_ARBITRATION, "scsi%d : starting arbitration, id = %d\n", instance->host_no, instance->this_id);
 
        /* 
         * Set the phase bits to 0, otherwise the NCR5380 won't drive the 
@@ -1333,7 +1324,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
                goto failed;
        }
 
-       dprintk(NDEBUG_ARBITRATION, ("scsi%d : arbitration complete\n", instance->host_no));
+       dprintk(NDEBUG_ARBITRATION, "scsi%d : arbitration complete\n", instance->host_no);
 
        /* 
         * The arbitration delay is 2.2us, but this is a minimum and there is 
@@ -1347,7 +1338,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
        /* Check for lost arbitration */
        if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
                NCR5380_write(MODE_REG, MR_BASE);
-               dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no));
+               dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting MR_ARBITRATE\n", instance->host_no);
                goto failed;
        }
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL);
@@ -1360,7 +1351,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
            (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST)) {
                NCR5380_write(MODE_REG, MR_BASE);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               dprintk(NDEBUG_ARBITRATION, ("scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no));
+               dprintk(NDEBUG_ARBITRATION, "scsi%d : lost arbitration, deasserting ICR_ASSERT_SEL\n", instance->host_no);
                goto failed;
        }
        /* 
@@ -1370,7 +1361,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
 
        udelay(2);
 
-       dprintk(NDEBUG_ARBITRATION, ("scsi%d : won arbitration\n", instance->host_no));
+       dprintk(NDEBUG_ARBITRATION, "scsi%d : won arbitration\n", instance->host_no);
 
        /* 
         * Now that we have won arbitration, start Selection process, asserting 
@@ -1422,7 +1413,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag)
 
        udelay(1);
 
-       dprintk(NDEBUG_SELECTION, ("scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd)));
+       dprintk(NDEBUG_SELECTION, "scsi%d : selecting target %d\n", instance->host_no, scmd_id(cmd));
 
        /* 
         * The SCSI specification calls for a 250 ms timeout for the actual 
@@ -1487,7 +1478,7 @@ part2:
                collect_stats(hostdata, cmd);
                cmd->scsi_done(cmd);
                NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-               dprintk(NDEBUG_SELECTION, ("scsi%d : target did not respond within 250ms\n", instance->host_no));
+               dprintk(NDEBUG_SELECTION, "scsi%d : target did not respond within 250ms\n", instance->host_no);
                NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                return 0;
        }
@@ -1520,7 +1511,7 @@ part2:
                goto failed;
        }
 
-       dprintk(NDEBUG_SELECTION, ("scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id));
+       dprintk(NDEBUG_SELECTION, "scsi%d : target %d selected, going into MESSAGE OUT phase.\n", instance->host_no, cmd->device->id);
        tmp[0] = IDENTIFY(((instance->irq == SCSI_IRQ_NONE) ? 0 : 1), cmd->device->lun);
 
        len = 1;
@@ -1530,7 +1521,7 @@ part2:
        data = tmp;
        phase = PHASE_MSGOUT;
        NCR5380_transfer_pio(instance, &phase, &len, &data);
-       dprintk(NDEBUG_SELECTION, ("scsi%d : nexus established.\n", instance->host_no));
+       dprintk(NDEBUG_SELECTION, "scsi%d : nexus established.\n", instance->host_no);
        /* XXX need to handle errors here */
        hostdata->connected = cmd;
        hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
@@ -1583,9 +1574,9 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
        NCR5380_setup(instance);
 
        if (!(p & SR_IO))
-               dprintk(NDEBUG_PIO, ("scsi%d : pio write %d bytes\n", instance->host_no, c));
+               dprintk(NDEBUG_PIO, "scsi%d : pio write %d bytes\n", instance->host_no, c);
        else
-               dprintk(NDEBUG_PIO, ("scsi%d : pio read %d bytes\n", instance->host_no, c));
+               dprintk(NDEBUG_PIO, "scsi%d : pio read %d bytes\n", instance->host_no, c);
 
        /* 
         * The NCR5380 chip will only drive the SCSI bus when the 
@@ -1620,11 +1611,11 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
                        break;
                }
 
-               dprintk(NDEBUG_HANDSHAKE, ("scsi%d : REQ detected\n", instance->host_no));
+               dprintk(NDEBUG_HANDSHAKE, "scsi%d : REQ detected\n", instance->host_no);
 
                /* Check for phase mismatch */
                if ((tmp & PHASE_MASK) != p) {
-                       dprintk(NDEBUG_HANDSHAKE, ("scsi%d : phase mismatch\n", instance->host_no));
+                       dprintk(NDEBUG_HANDSHAKE, "scsi%d : phase mismatch\n", instance->host_no);
                        NCR5380_dprint_phase(NDEBUG_HANDSHAKE, instance);
                        break;
                }
@@ -1660,7 +1651,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
 
                /* FIXME - if this fails bus reset ?? */
                NCR5380_poll_politely(instance, STATUS_REG, SR_REQ, 0, 5*HZ);
-               dprintk(NDEBUG_HANDSHAKE, ("scsi%d : req false, handshake complete\n", instance->host_no));
+               dprintk(NDEBUG_HANDSHAKE, "scsi%d : req false, handshake complete\n", instance->host_no);
 
 /*
  * We have several special cases to consider during REQ/ACK handshaking : 
@@ -1681,7 +1672,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance, unsigned char *phase
                }
        } while (--c);
 
-       dprintk(NDEBUG_PIO, ("scsi%d : residual %d\n", instance->host_no, c));
+       dprintk(NDEBUG_PIO, "scsi%d : residual %d\n", instance->host_no, c);
 
        *count = c;
        *data = d;
@@ -1828,7 +1819,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                c -= 2;
        }
 #endif
-       dprintk(NDEBUG_DMA, ("scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d));
+       dprintk(NDEBUG_DMA, "scsi%d : initializing DMA channel %d for %s, %d bytes %s %0x\n", instance->host_no, instance->dma_channel, (p & SR_IO) ? "reading" : "writing", c, (p & SR_IO) ? "to" : "from", (unsigned) d);
        hostdata->dma_len = (p & SR_IO) ? NCR5380_dma_read_setup(instance, d, c) : NCR5380_dma_write_setup(instance, d, c);
 #endif
 
@@ -1857,7 +1848,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE);
 #endif                         /* def REAL_DMA */
 
-       dprintk(NDEBUG_DMA, ("scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG)));
+       dprintk(NDEBUG_DMA, "scsi%d : mode reg = 0x%X\n", instance->host_no, NCR5380_read(MODE_REG));
 
        /* 
         *      On the PAS16 at least I/O recovery delays are not needed here.
@@ -1934,7 +1925,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                }
        }
 
-       dprintk(NDEBUG_DMA, ("scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG)));
+       dprintk(NDEBUG_DMA, "scsi%d : polled DMA transfer complete, basr 0x%X, sr 0x%X\n", instance->host_no, tmp, NCR5380_read(STATUS_REG));
 
        NCR5380_write(MODE_REG, MR_BASE);
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
@@ -1948,7 +1939,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
 #ifdef READ_OVERRUNS
        if (*phase == p && (p & SR_IO) && residue == 0) {
                if (overrun) {
-                       dprintk(NDEBUG_DMA, ("Got an input overrun, using saved byte\n"));
+                       dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
                        **data = saved_data;
                        *data += 1;
                        *count -= 1;
@@ -1957,13 +1948,13 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                        printk("No overrun??\n");
                        cnt = toPIO = 2;
                }
-               dprintk(NDEBUG_DMA, ("Doing %d-byte PIO to 0x%X\n", cnt, *data));
+               dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%X\n", cnt, *data);
                NCR5380_transfer_pio(instance, phase, &cnt, data);
                *count -= toPIO - cnt;
        }
 #endif
 
-       dprintk(NDEBUG_DMA, ("Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count)));
+       dprintk(NDEBUG_DMA, "Return with data ptr = 0x%X, count %d, last 0x%X, next 0x%X\n", *data, *count, *(*data + *count - 1), *(*data + *count));
        return 0;
 
 #elif defined(REAL_DMA)
@@ -2013,7 +2004,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                foo = NCR5380_pwrite(instance, d, c);
 #else
                int timeout;
-               dprintk(NDEBUG_C400_PWRITE, ("About to pwrite %d bytes\n", c));
+               dprintk(NDEBUG_C400_PWRITE, "About to pwrite %d bytes\n", c);
                if (!(foo = NCR5380_pwrite(instance, d, c))) {
                        /*
                         * Wait for the last byte to be sent.  If REQ is being asserted for 
@@ -2024,19 +2015,19 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
                                while (!(NCR5380_read(BUS_AND_STATUS_REG) & BASR_DRQ) && (NCR5380_read(BUS_AND_STATUS_REG) & BASR_PHASE_MATCH));
 
                                if (!timeout)
-                                       dprintk(NDEBUG_LAST_BYTE_SENT, ("scsi%d : timed out on last byte\n", instance->host_no));
+                                       dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : timed out on last byte\n", instance->host_no);
 
                                if (hostdata->flags & FLAG_CHECK_LAST_BYTE_SENT) {
                                        hostdata->flags &= ~FLAG_CHECK_LAST_BYTE_SENT;
                                        if (NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT) {
                                                hostdata->flags |= FLAG_HAS_LAST_BYTE_SENT;
-                                               dprintk(NDEBUG_LAST_WRITE_SENT, ("scsi%d : last bit sent works\n", instance->host_no));
+                                               dprintk(NDEBUG_LAST_BYTE_SENT, "scsi%d : last byte sent works\n", instance->host_no);
                                        }
                                }
                        } else {
-                               dprintk(NDEBUG_C400_PWRITE, ("Waiting for LASTBYTE\n"));
+                               dprintk(NDEBUG_C400_PWRITE, "Waiting for LASTBYTE\n");
                                while (!(NCR5380_read(TARGET_COMMAND_REG) & TCR_LAST_BYTE_SENT));
-                               dprintk(NDEBUG_C400_PWRITE, ("Got LASTBYTE\n"));
+                               dprintk(NDEBUG_C400_PWRITE, "Got LASTBYTE\n");
                        }
                }
 #endif
@@ -2045,9 +2036,9 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance, unsigned char *phase
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
        if ((!(p & SR_IO)) && (hostdata->flags & FLAG_NCR53C400)) {
-               dprintk(NDEBUG_C400_PWRITE, ("53C400w: Checking for IRQ\n"));
+               dprintk(NDEBUG_C400_PWRITE, "53C400w: Checking for IRQ\n");
                if (NCR5380_read(BUS_AND_STATUS_REG) & BASR_IRQ) {
-                       dprintk(NDEBUG_C400_PWRITE, ("53C400w:    got it, reading reset interrupt reg\n"));
+                       dprintk(NDEBUG_C400_PWRITE, "53C400w:    got it, reading reset interrupt reg\n");
                        NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                } else {
                        printk("53C400w:    IRQ NOT THERE!\n");
@@ -2139,7 +2130,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                        --cmd->SCp.buffers_residual;
                                        cmd->SCp.this_residual = cmd->SCp.buffer->length;
                                        cmd->SCp.ptr = sg_virt(cmd->SCp.buffer);
-                                       dprintk(NDEBUG_INFORMATION, ("scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual));
+                                       dprintk(NDEBUG_INFORMATION, "scsi%d : %d bytes and %d buffers left\n", instance->host_no, cmd->SCp.this_residual, cmd->SCp.buffers_residual);
                                }
                                /*
                                 * The preferred transfer method is going to be 
@@ -2219,7 +2210,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                case LINKED_FLG_CMD_COMPLETE:
                                        /* Accept message by clearing ACK */
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-                                       dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun));
+                                       dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked command complete.\n", instance->host_no, cmd->device->id, cmd->device->lun);
                                        /* 
                                         * Sanity check : A linked command should only terminate with
                                         * one of these messages if there are more linked commands
@@ -2235,7 +2226,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                        /* The next command is still part of this process */
                                        cmd->next_link->tag = cmd->tag;
                                        cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
-                                       dprintk(NDEBUG_LINKED, ("scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun));
+                                       dprintk(NDEBUG_LINKED, "scsi%d : target %d lun %d linked request done, calling scsi_done().\n", instance->host_no, cmd->device->id, cmd->device->lun);
                                        collect_stats(hostdata, cmd);
                                        cmd->scsi_done(cmd);
                                        cmd = hostdata->connected;
@@ -2247,7 +2238,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                        sink = 1;
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                                        hostdata->connected = NULL;
-                                       dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun));
+                                       dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d, lun %d completed\n", instance->host_no, cmd->device->id, cmd->device->lun);
                                        hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
 
                                        /* 
@@ -2281,13 +2272,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                        if ((cmd->cmnd[0] != REQUEST_SENSE) && (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
                                                scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
 
-                                               dprintk(NDEBUG_AUTOSENSE, ("scsi%d : performing request sense\n", instance->host_no));
+                                               dprintk(NDEBUG_AUTOSENSE, "scsi%d : performing request sense\n", instance->host_no);
 
                                                LIST(cmd, hostdata->issue_queue);
                                                cmd->host_scribble = (unsigned char *)
                                                    hostdata->issue_queue;
                                                hostdata->issue_queue = (Scsi_Cmnd *) cmd;
-                                               dprintk(NDEBUG_QUEUES, ("scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no));
+                                               dprintk(NDEBUG_QUEUES, "scsi%d : REQUEST SENSE added to head of issue queue\n", instance->host_no);
                                        } else
 #endif                         /* def AUTOSENSE */
                                        {
@@ -2327,7 +2318,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                                    hostdata->disconnected_queue;
                                                hostdata->connected = NULL;
                                                hostdata->disconnected_queue = cmd;
-                                               dprintk(NDEBUG_QUEUES, ("scsi%d : command for target %d lun %d was moved from connected to" "  the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun));
+                                               dprintk(NDEBUG_QUEUES, "scsi%d : command for target %d lun %d was moved from connected to" "  the disconnected_queue\n", instance->host_no, cmd->device->id, cmd->device->lun);
                                                /* 
                                                 * Restore phase bits to 0 so an interrupted selection, 
                                                 * arbitration can resume.
@@ -2373,14 +2364,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                        extended_msg[0] = EXTENDED_MESSAGE;
                                        /* Accept first byte by clearing ACK */
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-                                       dprintk(NDEBUG_EXTENDED, ("scsi%d : receiving extended message\n", instance->host_no));
+                                       dprintk(NDEBUG_EXTENDED, "scsi%d : receiving extended message\n", instance->host_no);
 
                                        len = 2;
                                        data = extended_msg + 1;
                                        phase = PHASE_MSGIN;
                                        NCR5380_transfer_pio(instance, &phase, &len, &data);
 
-                                       dprintk(NDEBUG_EXTENDED, ("scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]));
+                                       dprintk(NDEBUG_EXTENDED, "scsi%d : length=%d, code=0x%02x\n", instance->host_no, (int) extended_msg[1], (int) extended_msg[2]);
 
                                        if (!len && extended_msg[1] <= (sizeof(extended_msg) - 1)) {
                                                /* Accept third byte by clearing ACK */
@@ -2390,7 +2381,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                                phase = PHASE_MSGIN;
 
                                                NCR5380_transfer_pio(instance, &phase, &len, &data);
-                                               dprintk(NDEBUG_EXTENDED, ("scsi%d : message received, residual %d\n", instance->host_no, len));
+                                               dprintk(NDEBUG_EXTENDED, "scsi%d : message received, residual %d\n", instance->host_no, len);
 
                                                switch (extended_msg[2]) {
                                                case EXTENDED_SDTR:
@@ -2456,7 +2447,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                NCR5380_transfer_pio(instance, &phase, &len, &data);
                                if (!cmd->device->disconnect && should_disconnect(cmd->cmnd[0])) {
                                        NCR5380_set_timer(hostdata, USLEEP_SLEEP);
-                                       dprintk(NDEBUG_USLEEP, ("scsi%d : issued command, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
+                                       dprintk(NDEBUG_USLEEP, "scsi%d : issued command, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
                                        return;
                                }
                                break;
@@ -2468,7 +2459,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                                break;
                        default:
                                printk("scsi%d : unknown phase\n", instance->host_no);
-                               NCR5380_dprint(NDEBUG_ALL, instance);
+                               NCR5380_dprint(NDEBUG_ANY, instance);
                        }       /* switch(phase) */
                }               /* if (tmp * SR_REQ) */
                else {
@@ -2476,7 +2467,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) {
                         */
                        if (!cmd->device->disconnect && time_after_eq(jiffies, poll_time)) {
                                NCR5380_set_timer(hostdata, USLEEP_SLEEP);
-                               dprintk(NDEBUG_USLEEP, ("scsi%d : poll timed out, sleeping until %ul\n", instance->host_no, hostdata->time_expires));
+                               dprintk(NDEBUG_USLEEP, "scsi%d : poll timed out, sleeping until %lu\n", instance->host_no, hostdata->time_expires);
                                return;
                        }
                }
@@ -2517,7 +2508,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
        hostdata->restart_select = 1;
 
        target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
-       dprintk(NDEBUG_SELECTION, ("scsi%d : reselect\n", instance->host_no));
+       dprintk(NDEBUG_SELECTION, "scsi%d : reselect\n", instance->host_no);
 
        /* 
         * At this point, we have detected that our SCSI ID is on the bus,
@@ -2597,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance) {
                do_abort(instance);
        } else {
                hostdata->connected = tmp;
-               dprintk(NDEBUG_RESELECTION, ("scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->target, tmp->lun, tmp->tag));
+               dprintk(NDEBUG_RESELECTION, "scsi%d : nexus established, target = %d, lun = %d, tag = %d\n", instance->host_no, tmp->device->id, tmp->device->lun, tmp->tag);
        }
 }
 
@@ -2682,8 +2673,8 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
 
        NCR5380_setup(instance);
 
-       dprintk(NDEBUG_ABORT, ("scsi%d : abort called\n", instance->host_no));
-       dprintk(NDEBUG_ABORT, ("        basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG)));
+       dprintk(NDEBUG_ABORT, "scsi%d : abort called\n", instance->host_no);
+       dprintk(NDEBUG_ABORT, "        basr 0x%X, sr 0x%X\n", NCR5380_read(BUS_AND_STATUS_REG), NCR5380_read(STATUS_REG));
 
 #if 0
 /*
@@ -2693,7 +2684,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
  */
 
        if (hostdata->connected == cmd) {
-               dprintk(NDEBUG_ABORT, ("scsi%d : aborting connected command\n", instance->host_no));
+               dprintk(NDEBUG_ABORT, "scsi%d : aborting connected command\n", instance->host_no);
                hostdata->aborted = 1;
 /*
  * We should perform BSY checking, and make sure we haven't slipped
@@ -2721,14 +2712,14 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
  *          from the issue queue.
  */
  
-       dprintk(NDEBUG_ABORT, ("scsi%d : abort going into loop.\n", instance->host_no));
+       dprintk(NDEBUG_ABORT, "scsi%d : abort going into loop.\n", instance->host_no);
        for (prev = (Scsi_Cmnd **) & (hostdata->issue_queue), tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; prev = (Scsi_Cmnd **) & (tmp->host_scribble), tmp = (Scsi_Cmnd *) tmp->host_scribble)
                if (cmd == tmp) {
                        REMOVE(5, *prev, tmp, tmp->host_scribble);
                        (*prev) = (Scsi_Cmnd *) tmp->host_scribble;
                        tmp->host_scribble = NULL;
                        tmp->result = DID_ABORT << 16;
-                       dprintk(NDEBUG_ABORT, ("scsi%d : abort removed command from issue queue.\n", instance->host_no));
+                       dprintk(NDEBUG_ABORT, "scsi%d : abort removed command from issue queue.\n", instance->host_no);
                        tmp->scsi_done(tmp);
                        return SUCCESS;
                }
@@ -2750,7 +2741,7 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
  */
 
        if (hostdata->connected) {
-               dprintk(NDEBUG_ABORT, ("scsi%d : abort failed, command connected.\n", instance->host_no));
+               dprintk(NDEBUG_ABORT, "scsi%d : abort failed, command connected.\n", instance->host_no);
                return FAILED;
        }
 /*
@@ -2780,11 +2771,11 @@ static int NCR5380_abort(Scsi_Cmnd * cmd) {
 
        for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; tmp = (Scsi_Cmnd *) tmp->host_scribble)
                if (cmd == tmp) {
-                       dprintk(NDEBUG_ABORT, ("scsi%d : aborting disconnected command.\n", instance->host_no));
+                       dprintk(NDEBUG_ABORT, "scsi%d : aborting disconnected command.\n", instance->host_no);
 
                        if (NCR5380_select(instance, cmd, (int) cmd->tag))
                                return FAILED;
-                       dprintk(NDEBUG_ABORT, ("scsi%d : nexus reestablished.\n", instance->host_no));
+                       dprintk(NDEBUG_ABORT, "scsi%d : nexus reestablished.\n", instance->host_no);
 
                        do_abort(instance);
 
index 14964d0a0e9dabc2bde55a7e6c8c54fb605bf3d2..c79ddfa6f53c52d58ab814846fb5023ed821bf4a 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: NCR5380.h,v $
- */
-
 #ifndef NCR5380_H
 #define NCR5380_H
 
@@ -60,6 +56,9 @@
 #define NDEBUG_C400_PREAD      0x100000
 #define NDEBUG_C400_PWRITE     0x200000
 #define NDEBUG_LISTS           0x400000
+#define NDEBUG_ABORT           0x800000
+#define NDEBUG_TAGS            0x1000000
+#define NDEBUG_MERGING         0x2000000
 
 #define NDEBUG_ANY             0xFFFFFFFFUL
 
@@ -292,9 +291,24 @@ struct NCR5380_hostdata {
 
 #ifdef __KERNEL__
 
-#define dprintk(a,b)                   do {} while(0)
-#define NCR5380_dprint(a,b)            do {} while(0)
-#define NCR5380_dprint_phase(a,b)      do {} while(0)
+#ifndef NDEBUG
+#define NDEBUG (0)
+#endif
+
+#define dprintk(flg, fmt, ...) \
+       do { if ((NDEBUG) & (flg)) pr_debug(fmt, ## __VA_ARGS__); } while (0)
+
+#if NDEBUG
+#define NCR5380_dprint(flg, arg) \
+       do { if ((NDEBUG) & (flg)) NCR5380_print(arg); } while (0)
+#define NCR5380_dprint_phase(flg, arg) \
+       do { if ((NDEBUG) & (flg)) NCR5380_print_phase(arg); } while (0)
+static void NCR5380_print_phase(struct Scsi_Host *instance);
+static void NCR5380_print(struct Scsi_Host *instance);
+#else
+#define NCR5380_dprint(flg, arg)       do {} while (0)
+#define NCR5380_dprint_phase(flg, arg) do {} while (0)
+#endif
 
 #if defined(AUTOPROBE_IRQ)
 static int NCR5380_probe_irq(struct Scsi_Host *instance, int possible);
@@ -307,10 +321,6 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id);
 #endif
 static void NCR5380_main(struct work_struct *work);
 static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
-#ifdef NDEBUG
-static void NCR5380_print_phase(struct Scsi_Host *instance);
-static void NCR5380_print(struct Scsi_Host *instance);
-#endif
 static int NCR5380_abort(Scsi_Cmnd * cmd);
 static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
 static int NCR5380_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
index 14b5f8d0e7f4748bbac375da3c9174956740dd60..cc9bd26f5d1abc01e92c30baadb1adfed1e84b85 100644 (file)
@@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
                for (bit = 0; bit < 8; bit++) {
 
                        if ((pci_status[i] & (0x1 << bit)) != 0) {
-                               static const char *s;
+                               const char *s;
 
                                s = pci_status_strings[bit];
                                if (i == 7/*TARG*/ && bit == 3)
@@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
 
                for (bit = 0; bit < 8; bit++) {
 
-                       if ((split_status[i] & (0x1 << bit)) != 0) {
-                               static const char *s;
-
-                               s = split_status_strings[bit];
-                               printk(s, ahd_name(ahd),
+                       if ((split_status[i] & (0x1 << bit)) != 0)
+                               printk(split_status_strings[bit], ahd_name(ahd),
                                       split_status_source[i]);
-                       }
 
                        if (i > 1)
                                continue;
 
-                       if ((sg_split_status[i] & (0x1 << bit)) != 0) {
-                               static const char *s;
-
-                               s = split_status_strings[bit];
-                               printk(s, ahd_name(ahd), "SG");
-                       }
+                       if ((sg_split_status[i] & (0x1 << bit)) != 0)
+                               printk(split_status_strings[bit], ahd_name(ahd), "SG");
                }
        }
        /*
index 059ff477a398290091ef35c8215a806799169e67..2e797a36760879a685949fbdcb7671e9bb496036 100644 (file)
  * comment out the undef.
  */
 #undef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
-/*
- * SCSI-II Linked command support.
- *
- * The higher level code doesn't support linked commands yet, and so the option
- * is undef'd here.
- */
-#undef CONFIG_SCSI_ACORNSCSI_LINK
 /*
  * SCSI-II Synchronous transfer support.
  *
 #error "Yippee!  ABORT TAG is now defined!  Remove this error!"
 #endif
 
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
-#error SCSI2 LINKed commands not supported (yet)!
-#endif
-
 #ifdef USE_DMAC
 /*
  * DMAC setup parameters
@@ -1668,42 +1657,6 @@ void acornscsi_message(AS_Host *host)
        }
        break;
 
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
-    case LINKED_CMD_COMPLETE:
-    case LINKED_FLG_CMD_COMPLETE:
-       /*
-        * We don't support linked commands yet
-        */
-       if (0) {
-#if (DEBUG & DEBUG_LINK)
-           printk("scsi%d.%c: lun %d tag %d linked command complete\n",
-                   host->host->host_no, acornscsi_target(host), host->SCpnt->tag);
-#endif
-           /*
-            * A linked command should only terminate with one of these messages
-            * if there are more linked commands available.
-            */
-           if (!host->SCpnt->next_link) {
-               printk(KERN_WARNING "scsi%d.%c: lun %d tag %d linked command complete, but no next_link\n",
-                       instance->host_no, acornscsi_target(host), host->SCpnt->tag);
-               acornscsi_sbic_issuecmd(host, CMND_ASSERTATN);
-               msgqueue_addmsg(&host->scsi.msgs, 1, ABORT);
-           } else {
-               struct scsi_cmnd *SCpnt = host->SCpnt;
-
-               acornscsi_dma_cleanup(host);
-
-               host->SCpnt = host->SCpnt->next_link;
-               host->SCpnt->tag = SCpnt->tag;
-               SCpnt->result = DID_OK | host->scsi.SCp.Message << 8 | host->Scsi.SCp.Status;
-               SCpnt->done(SCpnt);
-
-               /* initialise host->SCpnt->SCp */
-           }
-           break;
-       }
-#endif
-
     default: /* reject message */
        printk(KERN_ERR "scsi%d.%c: unrecognised message %02X, rejecting\n",
                host->host->host_no, acornscsi_target(host),
@@ -2825,9 +2778,6 @@ char *acornscsi_info(struct Scsi_Host *host)
 #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
     " TAG"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
-    " LINK"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
@@ -2851,9 +2801,6 @@ static int acornscsi_show_info(struct seq_file *m, struct Scsi_Host *instance)
 #ifdef CONFIG_SCSI_ACORNSCSI_TAGGED_QUEUE
     " TAG"
 #endif
-#ifdef CONFIG_SCSI_ACORNSCSI_LINK
-    " LINK"
-#endif
 #if (DEBUG & DEBUG_NO_WRITE)
     " NOWRITE (" __stringify(NO_WRITE) ")"
 #endif
index f8e0609000521fd734958538369bce29a7c1f913..8ef810a4476ec15f163069005ceae6b3f6b0f089 100644 (file)
@@ -36,9 +36,6 @@
        void __iomem *base;             \
        void __iomem *dma
 
-#define BOARD_NORMAL   0
-#define BOARD_NCR53C400        1
-
 #include "../NCR5380.h"
 
 void cumanascsi_setup(char *str, int *ints)
index 4266eef8aca12256b469b0983e39c652ac43f7c0..188e734c7ff006bb9b7e891ddca23ae00c884b3d 100644 (file)
@@ -37,9 +37,6 @@
 #define NCR5380_implementation_fields  \
        void __iomem *base
 
-#define BOARD_NORMAL   0
-#define BOARD_NCR53C400        1
-
 #include "../NCR5380.h"
 
 #undef START_DMA_INITIATOR_RECEIVE_REG
index 0f3cdbc80ba63a47d59c2e7d07a2cbf3ed67c8a9..1814aa20b7243f0f50f72b065e91b8a2172d9a94 100644 (file)
@@ -370,7 +370,7 @@ static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged)
                return 0;
        if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
            TagAlloc[cmd->device->id][cmd->device->lun].queue_size) {
-               TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n",
+               dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d: no free tags\n",
                           H_NO(cmd), cmd->device->id, cmd->device->lun);
                return 1;
        }
@@ -394,7 +394,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
            !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
                cmd->tag = TAG_NONE;
                hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
-               TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged "
+               dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d now allocated by untagged "
                           "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun);
        } else {
                TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
@@ -402,7 +402,7 @@ static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged)
                cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS);
                set_bit(cmd->tag, ta->allocated);
                ta->nr_allocated++;
-               TAG_PRINTK("scsi%d: using tag %d for target %d lun %d "
+               dprintk(NDEBUG_TAGS, "scsi%d: using tag %d for target %d lun %d "
                           "(now %d tags in use)\n",
                           H_NO(cmd), cmd->tag, cmd->device->id,
                           cmd->device->lun, ta->nr_allocated);
@@ -420,7 +420,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
 
        if (cmd->tag == TAG_NONE) {
                hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-               TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n",
+               dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d untagged cmd finished\n",
                           H_NO(cmd), cmd->device->id, cmd->device->lun);
        } else if (cmd->tag >= MAX_TAGS) {
                printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n",
@@ -429,7 +429,7 @@ static void cmd_free_tag(Scsi_Cmnd *cmd)
                TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
                clear_bit(cmd->tag, ta->allocated);
                ta->nr_allocated--;
-               TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n",
+               dprintk(NDEBUG_TAGS, "scsi%d: freed tag %d for target %d lun %d\n",
                           H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun);
        }
 }
@@ -478,7 +478,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
        for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1;
             cmd->SCp.buffers_residual &&
             virt_to_phys(sg_virt(&cmd->SCp.buffer[1])) == endaddr;) {
-               MER_PRINTK("VTOP(%p) == %08lx -> merging\n",
+               dprintk(NDEBUG_MERGING, "VTOP(%p) == %08lx -> merging\n",
                           page_address(sg_page(&cmd->SCp.buffer[1])), endaddr);
 #if (NDEBUG & NDEBUG_MERGING)
                ++cnt;
@@ -490,7 +490,7 @@ static void merge_contiguous_buffers(Scsi_Cmnd *cmd)
        }
 #if (NDEBUG & NDEBUG_MERGING)
        if (oldlen != cmd->SCp.this_residual)
-               MER_PRINTK("merged %d buffers from %p, new length %08x\n",
+               dprintk(NDEBUG_MERGING, "merged %d buffers from %p, new length %08x\n",
                           cnt, cmd->SCp.ptr, cmd->SCp.this_residual);
 #endif
 }
@@ -626,16 +626,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
        }
 }
 
-#else /* !NDEBUG */
-
-/* dummies... */
-static inline void NCR5380_print(struct Scsi_Host *instance)
-{
-};
-static inline void NCR5380_print_phase(struct Scsi_Host *instance)
-{
-};
-
 #endif
 
 /*
@@ -676,7 +666,7 @@ static inline void NCR5380_all_init(void)
 {
        static int done = 0;
        if (!done) {
-               INI_PRINTK("scsi : NCR5380_all_init()\n");
+               dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
                done = 1;
        }
 }
@@ -739,8 +729,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
        Scsi_Cmnd *ptr;
        unsigned long flags;
 
-       NCR_PRINT(NDEBUG_ANY);
-       NCR_PRINT_PHASE(NDEBUG_ANY);
+       NCR5380_dprint(NDEBUG_ANY, instance);
+       NCR5380_dprint_phase(NDEBUG_ANY, instance);
 
        hostdata = (struct NCR5380_hostdata *)instance->hostdata;
 
@@ -984,7 +974,7 @@ static int NCR5380_queue_command_lck(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *))
        }
        local_irq_restore(flags);
 
-       QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
+       dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
                  (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
 
        /* If queue_command() is called from an interrupt (real one or bottom
@@ -1054,7 +1044,7 @@ static void NCR5380_main(struct work_struct *work)
                done = 1;
 
                if (!hostdata->connected) {
-                       MAIN_PRINTK("scsi%d: not connected\n", HOSTNO);
+                       dprintk(NDEBUG_MAIN, "scsi%d: not connected\n", HOSTNO);
                        /*
                         * Search through the issue_queue for a command destined
                         * for a target that's not busy.
@@ -1107,7 +1097,7 @@ static void NCR5380_main(struct work_struct *work)
                                         * On failure, we must add the command back to the
                                         *   issue queue so we can keep trying.
                                         */
-                                       MAIN_PRINTK("scsi%d: main(): command for target %d "
+                                       dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
                                                    "lun %d removed from issue_queue\n",
                                                    HOSTNO, tmp->device->id, tmp->device->lun);
                                        /*
@@ -1140,7 +1130,7 @@ static void NCR5380_main(struct work_struct *work)
 #endif
                                                falcon_dont_release--;
                                                local_irq_restore(flags);
-                                               MAIN_PRINTK("scsi%d: main(): select() failed, "
+                                               dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
                                                            "returned to issue_queue\n", HOSTNO);
                                                if (hostdata->connected)
                                                        break;
@@ -1155,10 +1145,10 @@ static void NCR5380_main(struct work_struct *work)
 #endif
                    ) {
                        local_irq_restore(flags);
-                       MAIN_PRINTK("scsi%d: main: performing information transfer\n",
+                       dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
                                    HOSTNO);
                        NCR5380_information_transfer(instance);
-                       MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
+                       dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
                        done = 0;
                }
        } while (!done);
@@ -1204,12 +1194,12 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
                            (BASR_PHASE_MATCH|BASR_ACK)) {
                                saved_data = NCR5380_read(INPUT_DATA_REG);
                                overrun = 1;
-                               DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO);
+                               dprintk(NDEBUG_DMA, "scsi%d: read overrun handled\n", HOSTNO);
                        }
                }
        }
 
-       DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
+       dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
                   HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
                   NCR5380_read(STATUS_REG));
 
@@ -1229,13 +1219,13 @@ static void NCR5380_dma_complete(struct Scsi_Host *instance)
                if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) {
                        cnt = toPIO = atari_read_overruns;
                        if (overrun) {
-                               DMA_PRINTK("Got an input overrun, using saved byte\n");
+                               dprintk(NDEBUG_DMA, "Got an input overrun, using saved byte\n");
                                *(*data)++ = saved_data;
                                (*count)--;
                                cnt--;
                                toPIO--;
                        }
-                       DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
+                       dprintk(NDEBUG_DMA, "Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data);
                        NCR5380_transfer_pio(instance, &p, &cnt, data);
                        *count -= toPIO - cnt;
                }
@@ -1261,25 +1251,25 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
        int done = 1, handled = 0;
        unsigned char basr;
 
-       INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
+       dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
 
        /* Look for pending interrupts */
        basr = NCR5380_read(BUS_AND_STATUS_REG);
-       INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
+       dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
        /* dispatch to appropriate routine if found and done=0 */
        if (basr & BASR_IRQ) {
-               NCR_PRINT(NDEBUG_INTR);
+               NCR5380_dprint(NDEBUG_INTR, instance);
                if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
                        done = 0;
                        ENABLE_IRQ();
-                       INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
+                       dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
                        NCR5380_reselect(instance);
                        (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                } else if (basr & BASR_PARITY_ERROR) {
-                       INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
+                       dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
                        (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
-                       INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
+                       dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
                        (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
                } else {
                        /*
@@ -1298,7 +1288,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
                            ((basr & BASR_END_DMA_TRANSFER) ||
                             !(basr & BASR_PHASE_MATCH))) {
 
-                               INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
+                               dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
                                NCR5380_dma_complete( instance );
                                done = 0;
                                ENABLE_IRQ();
@@ -1323,7 +1313,7 @@ static irqreturn_t NCR5380_intr(int irq, void *dev_id)
        }
 
        if (!done) {
-               INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
+               dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
                /* Put a call to NCR5380_main() on the queue... */
                queue_main();
        }
@@ -1396,8 +1386,8 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
        unsigned long flags;
 
        hostdata->restart_select = 0;
-       NCR_PRINT(NDEBUG_ARBITRATION);
-       ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
+       NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+       dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
                   instance->this_id);
 
        /*
@@ -1442,7 +1432,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
                ;
 #endif
 
-       ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
+       dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
 
        if (hostdata->connected) {
                NCR5380_write(MODE_REG, MR_BASE);
@@ -1463,7 +1453,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
            (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
            hostdata->connected) {
                NCR5380_write(MODE_REG, MR_BASE);
-               ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
+               dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
                           HOSTNO);
                return -1;
        }
@@ -1478,7 +1468,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
            hostdata->connected) {
                NCR5380_write(MODE_REG, MR_BASE);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-               ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
+               dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
                           HOSTNO);
                return -1;
        }
@@ -1501,7 +1491,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
                return -1;
        }
 
-       ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
+       dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
 
        /*
         * Now that we have won arbitration, start Selection process, asserting
@@ -1561,7 +1551,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
 
        udelay(1);
 
-       SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
+       dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
 
        /*
         * The SCSI specification calls for a 250 ms timeout for the actual
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
                        printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
                        if (hostdata->restart_select)
                                printk(KERN_NOTICE "\trestart select\n");
-                       NCR_PRINT(NDEBUG_ANY);
+                       NCR5380_dprint(NDEBUG_ANY, instance);
                        NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                        return -1;
                }
@@ -1630,7 +1620,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
 #endif
                cmd->scsi_done(cmd);
                NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-               SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
+               dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
                NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
                return 0;
        }
@@ -1656,7 +1646,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
        while (!(NCR5380_read(STATUS_REG) & SR_REQ))
                ;
 
-       SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
+       dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
                   HOSTNO, cmd->device->id);
        tmp[0] = IDENTIFY(1, cmd->device->lun);
 
@@ -1676,7 +1666,7 @@ static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag)
        data = tmp;
        phase = PHASE_MSGOUT;
        NCR5380_transfer_pio(instance, &phase, &len, &data);
-       SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
+       dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
        /* XXX need to handle errors here */
        hostdata->connected = cmd;
 #ifndef SUPPORT_TAGS
@@ -1737,12 +1727,12 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
                while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ))
                        ;
 
-               HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
+               dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
 
                /* Check for phase mismatch */
                if ((tmp & PHASE_MASK) != p) {
-                       PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
-                       NCR_PRINT_PHASE(NDEBUG_PIO);
+                       dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
+                       NCR5380_dprint_phase(NDEBUG_PIO, instance);
                        break;
                }
 
@@ -1764,25 +1754,25 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
                if (!(p & SR_IO)) {
                        if (!((p & SR_MSG) && c > 1)) {
                                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA);
-                               NCR_PRINT(NDEBUG_PIO);
+                               NCR5380_dprint(NDEBUG_PIO, instance);
                                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
                                              ICR_ASSERT_DATA | ICR_ASSERT_ACK);
                        } else {
                                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
                                              ICR_ASSERT_DATA | ICR_ASSERT_ATN);
-                               NCR_PRINT(NDEBUG_PIO);
+                               NCR5380_dprint(NDEBUG_PIO, instance);
                                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
                                              ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
                        }
                } else {
-                       NCR_PRINT(NDEBUG_PIO);
+                       NCR5380_dprint(NDEBUG_PIO, instance);
                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
                }
 
                while (NCR5380_read(STATUS_REG) & SR_REQ)
                        ;
 
-               HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
+               dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
 
                /*
                 * We have several special cases to consider during REQ/ACK handshaking :
@@ -1803,7 +1793,7 @@ static int NCR5380_transfer_pio(struct Scsi_Host *instance,
                }
        } while (--c);
 
-       PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
+       dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
 
        *count = c;
        *data = d;
@@ -1917,7 +1907,7 @@ static int NCR5380_transfer_dma(struct Scsi_Host *instance,
        if (atari_read_overruns && (p & SR_IO))
                c -= atari_read_overruns;
 
-       DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+       dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
                   HOSTNO, (p & SR_IO) ? "reading" : "writing",
                   c, (p & SR_IO) ? "to" : "from", d);
 
@@ -1997,7 +1987,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                        phase = (tmp & PHASE_MASK);
                        if (phase != old_phase) {
                                old_phase = phase;
-                               NCR_PRINT_PHASE(NDEBUG_INFORMATION);
+                               NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
                        }
 
                        if (sink && (phase != PHASE_MSGOUT)) {
@@ -2039,7 +2029,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                         * they are at contiguous physical addresses.
                                         */
                                        merge_contiguous_buffers(cmd);
-                                       INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
+                                       dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
                                                   HOSTNO, cmd->SCp.this_residual,
                                                   cmd->SCp.buffers_residual);
                                }
@@ -2123,7 +2113,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        /* Accept message by clearing ACK */
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
-                                       LNK_PRINTK("scsi%d: target %d lun %d linked command "
+                                       dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
                                                   "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
 
                                        /* Enable reselect interrupts */
@@ -2148,7 +2138,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                         * and don't free it! */
                                        cmd->next_link->tag = cmd->tag;
                                        cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8);
-                                       LNK_PRINTK("scsi%d: target %d lun %d linked request "
+                                       dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
                                                   "done, calling scsi_done().\n",
                                                   HOSTNO, cmd->device->id, cmd->device->lun);
 #ifdef NCR5380_STATS
@@ -2165,7 +2155,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        /* ++guenther: possible race with Falcon locking */
                                        falcon_dont_release++;
                                        hostdata->connected = NULL;
-                                       QU_PRINTK("scsi%d: command for target %d, lun %d "
+                                       dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
                                                  "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
 #ifdef SUPPORT_TAGS
                                        cmd_free_tag(cmd);
@@ -2179,7 +2169,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                                /* ++Andreas: the mid level code knows about
                                                   QUEUE_FULL now. */
                                                TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
-                                               TAG_PRINTK("scsi%d: target %d lun %d returned "
+                                               dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
                                                           "QUEUE_FULL after %d commands\n",
                                                           HOSTNO, cmd->device->id, cmd->device->lun,
                                                           ta->nr_allocated);
@@ -2224,14 +2214,14 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                            (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
                                                scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
 
-                                               ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO);
+                                               dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n", HOSTNO);
 
                                                local_irq_save(flags);
                                                LIST(cmd,hostdata->issue_queue);
                                                SET_NEXT(cmd, hostdata->issue_queue);
                                                hostdata->issue_queue = (Scsi_Cmnd *) cmd;
                                                local_irq_restore(flags);
-                                               QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
+                                               dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
                                                          "issue queue\n", H_NO(cmd));
                                        } else
 #endif /* def AUTOSENSE */
@@ -2277,7 +2267,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                                cmd->device->tagged_supported = 0;
                                                hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
                                                cmd->tag = TAG_NONE;
-                                               TAG_PRINTK("scsi%d: target %d lun %d rejected "
+                                               dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
                                                           "QUEUE_TAG message; tagged queuing "
                                                           "disabled\n",
                                                           HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2294,7 +2284,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        hostdata->connected = NULL;
                                        hostdata->disconnected_queue = cmd;
                                        local_irq_restore(flags);
-                                       QU_PRINTK("scsi%d: command for target %d lun %d was "
+                                       dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
                                                  "moved from connected to the "
                                                  "disconnected_queue\n", HOSTNO,
                                                  cmd->device->id, cmd->device->lun);
@@ -2344,13 +2334,13 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                        /* Accept first byte by clearing ACK */
                                        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
-                                       EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
+                                       dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
 
                                        len = 2;
                                        data = extended_msg + 1;
                                        phase = PHASE_MSGIN;
                                        NCR5380_transfer_pio(instance, &phase, &len, &data);
-                                       EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
+                                       dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
                                                   (int)extended_msg[1], (int)extended_msg[2]);
 
                                        if (!len && extended_msg[1] <=
@@ -2362,7 +2352,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                                phase = PHASE_MSGIN;
 
                                                NCR5380_transfer_pio(instance, &phase, &len, &data);
-                                               EXT_PRINTK("scsi%d: message received, residual %d\n",
+                                               dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
                                                           HOSTNO, len);
 
                                                switch (extended_msg[2]) {
@@ -2451,7 +2441,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
                                break;
                        default:
                                printk("scsi%d: unknown phase\n", HOSTNO);
-                               NCR_PRINT(NDEBUG_ANY);
+                               NCR5380_dprint(NDEBUG_ANY, instance);
                        } /* switch(phase) */
                } /* if (tmp * SR_REQ) */
        } /* while (1) */
@@ -2493,7 +2483,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
 
        target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
 
-       RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
+       dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
 
        /*
         * At this point, we have detected that our SCSI ID is on the bus,
@@ -2544,7 +2534,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
                if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
                    msg[1] == SIMPLE_QUEUE_TAG)
                        tag = msg[2];
-               TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
+               dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
                           "reselection\n", HOSTNO, target_mask, lun, tag);
        }
 #endif
@@ -2598,7 +2588,7 @@ static void NCR5380_reselect(struct Scsi_Host *instance)
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
        hostdata->connected = tmp;
-       RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+       dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
                   HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
        falcon_dont_release--;
 }
@@ -2640,7 +2630,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
                printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n",
                       HOSTNO);
 
-       ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
+       dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
                    NCR5380_read(BUS_AND_STATUS_REG),
                    NCR5380_read(STATUS_REG));
 
@@ -2653,7 +2643,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
 
        if (hostdata->connected == cmd) {
 
-               ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
+               dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
                /*
                 * We should perform BSY checking, and make sure we haven't slipped
                 * into BUS FREE.
@@ -2683,11 +2673,11 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
                        local_irq_restore(flags);
                        cmd->scsi_done(cmd);
                        falcon_release_lock_if_possible(hostdata);
-                       return SCSI_ABORT_SUCCESS;
+                       return SUCCESS;
                } else {
 /*                     local_irq_restore(flags); */
                        printk("scsi%d: abort of connected command failed!\n", HOSTNO);
-                       return SCSI_ABORT_ERROR;
+                       return FAILED;
                }
        }
 #endif
@@ -2705,13 +2695,13 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
                        SET_NEXT(tmp, NULL);
                        tmp->result = DID_ABORT << 16;
                        local_irq_restore(flags);
-                       ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
+                       dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
                                    HOSTNO);
                        /* Tagged queuing note: no tag to free here, hasn't been assigned
                         * yet... */
                        tmp->scsi_done(tmp);
                        falcon_release_lock_if_possible(hostdata);
-                       return SCSI_ABORT_SUCCESS;
+                       return SUCCESS;
                }
        }
 
@@ -2728,8 +2718,8 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
 
        if (hostdata->connected) {
                local_irq_restore(flags);
-               ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
-               return SCSI_ABORT_SNOOZE;
+               dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
+               return FAILED;
        }
 
        /*
@@ -2761,12 +2751,12 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
             tmp = NEXT(tmp)) {
                if (cmd == tmp) {
                        local_irq_restore(flags);
-                       ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
+                       dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
 
                        if (NCR5380_select(instance, cmd, (int)cmd->tag))
-                               return SCSI_ABORT_BUSY;
+                               return FAILED;
 
-                       ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
+                       dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
 
                        do_abort(instance);
 
@@ -2791,7 +2781,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
                                        local_irq_restore(flags);
                                        tmp->scsi_done(tmp);
                                        falcon_release_lock_if_possible(hostdata);
-                                       return SCSI_ABORT_SUCCESS;
+                                       return SUCCESS;
                                }
                        }
                }
@@ -2816,7 +2806,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
         */
        falcon_release_lock_if_possible(hostdata);
 
-       return SCSI_ABORT_NOT_RUNNING;
+       return FAILED;
 }
 
 
@@ -2825,7 +2815,7 @@ int NCR5380_abort(Scsi_Cmnd *cmd)
  *
  * Purpose : reset the SCSI bus.
  *
- * Returns : SCSI_RESET_WAKEUP
+ * Returns : SUCCESS or FAILURE
  *
  */
 
@@ -2834,7 +2824,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
        SETUP_HOSTDATA(cmd->device->host);
        int i;
        unsigned long flags;
-#if 1
+#if defined(RESET_RUN_DONE)
        Scsi_Cmnd *connected, *disconnected_queue;
 #endif
 
@@ -2859,7 +2849,14 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
         * through anymore ... */
        (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
 
-#if 1  /* XXX Should now be done by midlevel code, but it's broken XXX */
+       /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
+        * should go.
+        * Catch-22: if we don't clear all queues, the SCSI driver lock will
+        * not be reset by atari_scsi_reset()!
+        */
+
+#if defined(RESET_RUN_DONE)
+       /* XXX Should now be done by midlevel code, but it's broken XXX */
        /* XXX see below                                            XXX */
 
        /* MSch: old-style reset: actually abort all command processing here */
@@ -2890,7 +2887,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
         */
 
        if ((cmd = connected)) {
-               ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+               dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
                cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
                cmd->scsi_done(cmd);
        }
@@ -2902,7 +2899,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
                cmd->scsi_done(cmd);
        }
        if (i > 0)
-               ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
+               dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
 
        /* The Falcon lock should be released after a reset...
         */
@@ -2915,7 +2912,7 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
         * the midlevel code that the reset was SUCCESSFUL, and there is no
         * need to 'wake up' the commands by a request_sense
         */
-       return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+       return SUCCESS;
 #else /* 1 */
 
        /* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2942,11 +2939,11 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
         */
 
        if (hostdata->issue_queue)
-               ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
+               dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
        if (hostdata->connected)
-               ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+               dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
        if (hostdata->disconnected_queue)
-               ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
+               dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
 
        local_irq_save(flags);
        hostdata->issue_queue = NULL;
@@ -2963,6 +2960,6 @@ static int NCR5380_bus_reset(Scsi_Cmnd *cmd)
        local_irq_restore(flags);
 
        /* we did no complete reset of all commands, so a wakeup is required */
-       return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
+       return SUCCESS;
 #endif /* 1 */
 }
index a8d721ff19ebff8b5f11e4cfc1b17e715defec30..b522134528d6185bb54ab4afbc3b9fcc02b881c4 100644 (file)
 
 #include <linux/module.h>
 
-#define NDEBUG (0)
-
-#define NDEBUG_ABORT           0x00100000
-#define NDEBUG_TAGS            0x00200000
-#define NDEBUG_MERGING         0x00400000
-
 #define AUTOSENSE
 /* For the Atari version, use only polled IO or REAL_DMA */
 #define        REAL_DMA
@@ -314,7 +308,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
 
        dma_stat = tt_scsi_dma.dma_ctrl;
 
-       INT_PRINTK("scsi%d: NCR5380 interrupt, DMA status = %02x\n",
+       dprintk(NDEBUG_INTR, "scsi%d: NCR5380 interrupt, DMA status = %02x\n",
                   atari_scsi_host->host_no, dma_stat & 0xff);
 
        /* Look if it was the DMA that has interrupted: First possibility
@@ -340,7 +334,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
        if ((dma_stat & 0x02) && !(dma_stat & 0x40)) {
                atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr);
 
-               DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
+               dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
                           atari_dma_residual);
 
                if ((signed int)atari_dma_residual < 0)
@@ -371,7 +365,7 @@ static irqreturn_t scsi_tt_intr(int irq, void *dummy)
                         * other command.  These shouldn't disconnect anyway.
                         */
                        if (atari_dma_residual & 0x1ff) {
-                               DMA_PRINTK("SCSI DMA: DMA bug corrected, "
+                               dprintk(NDEBUG_DMA, "SCSI DMA: DMA bug corrected, "
                                           "difference %ld bytes\n",
                                           512 - (atari_dma_residual & 0x1ff));
                                atari_dma_residual = (atari_dma_residual + 511) & ~0x1ff;
@@ -438,7 +432,7 @@ static irqreturn_t scsi_falcon_intr(int irq, void *dummy)
                               "ST-DMA fifo\n", transferred & 15);
 
                atari_dma_residual = HOSTDATA_DMALEN - transferred;
-               DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n",
+               dprintk(NDEBUG_DMA, "SCSI DMA: There are %ld residual bytes.\n",
                           atari_dma_residual);
        } else
                atari_dma_residual = 0;
@@ -474,11 +468,11 @@ static void atari_scsi_fetch_restbytes(void)
                /* there are 'nr' bytes left for the last long address
                   before the DMA pointer */
                phys_dst ^= nr;
-               DMA_PRINTK("SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
+               dprintk(NDEBUG_DMA, "SCSI DMA: there are %d rest bytes for phys addr 0x%08lx",
                           nr, phys_dst);
                /* The content of the DMA pointer is a physical address!  */
                dst = phys_to_virt(phys_dst);
-               DMA_PRINTK(" = virt addr %p\n", dst);
+               dprintk(NDEBUG_DMA, " = virt addr %p\n", dst);
                for (src = (char *)&tt_scsi_dma.dma_restdata; nr != 0; --nr)
                        *dst++ = *src++;
        }
@@ -827,7 +821,7 @@ static int atari_scsi_bus_reset(Scsi_Cmnd *cmd)
        } else {
                atari_turnon_irq(IRQ_MFP_FSCSI);
        }
-       if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS)
+       if (rv == SUCCESS)
                falcon_release_lock_if_possible(hostdata);
 
        return rv;
@@ -883,7 +877,7 @@ static unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance,
 {
        unsigned long addr = virt_to_phys(data);
 
-       DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
+       dprintk(NDEBUG_DMA, "scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, "
                   "dir = %d\n", instance->host_no, data, addr, count, dir);
 
        if (!IS_A_TT() && !STRAM_ADDR(addr)) {
@@ -1063,7 +1057,7 @@ static unsigned long atari_dma_xfer_len(unsigned long wanted_len,
                possible_len = limit;
 
        if (possible_len != wanted_len)
-               DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes "
+               dprintk(NDEBUG_DMA, "Sorry, must cut DMA transfer size to %ld bytes "
                           "instead of %ld\n", possible_len, wanted_len);
 
        return possible_len;
index 11c624bb122dcd47078ac74ac4fd6eafbef9bbb6..3299d91d7336dc988d8e611e765cf918a7027274 100644 (file)
 #define        NCR5380_dma_xfer_len(i,cmd,phase) \
        atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1)
 
-/* former generic SCSI error handling stuff */
-
-#define SCSI_ABORT_SNOOZE 0
-#define SCSI_ABORT_SUCCESS 1
-#define SCSI_ABORT_PENDING 2
-#define SCSI_ABORT_BUSY 3
-#define SCSI_ABORT_NOT_RUNNING 4
-#define SCSI_ABORT_ERROR 5
-
-#define SCSI_RESET_SNOOZE 0
-#define SCSI_RESET_PUNT 1
-#define SCSI_RESET_SUCCESS 2
-#define SCSI_RESET_PENDING 3
-#define SCSI_RESET_WAKEUP 4
-#define SCSI_RESET_NOT_RUNNING 5
-#define SCSI_RESET_ERROR 6
-
-#define SCSI_RESET_SYNCHRONOUS         0x01
-#define SCSI_RESET_ASYNCHRONOUS                0x02
-#define SCSI_RESET_SUGGEST_BUS_RESET   0x04
-#define SCSI_RESET_SUGGEST_HOST_RESET  0x08
-
-#define SCSI_RESET_BUS_RESET 0x100
-#define SCSI_RESET_HOST_RESET 0x200
-#define SCSI_RESET_ACTION   0xff
-
-/* Debugging printk definitions:
- *
- *  ARB  -> arbitration
- *  ASEN -> auto-sense
- *  DMA  -> DMA
- *  HSH  -> PIO handshake
- *  INF  -> information transfer
- *  INI  -> initialization
- *  INT  -> interrupt
- *  LNK  -> linked commands
- *  MAIN -> NCR5380_main() control flow
- *  NDAT -> no data-out phase
- *  NWR  -> no write commands
- *  PIO  -> PIO transfers
- *  PDMA -> pseudo DMA (unused on Atari)
- *  QU   -> queues
- *  RSL  -> reselections
- *  SEL  -> selections
- *  USL  -> usleep cpde (unused on Atari)
- *  LBS  -> last byte sent (unused on Atari)
- *  RSS  -> restarting of selections
- *  EXT  -> extended messages
- *  ABRT -> aborting and resetting
- *  TAG  -> queue tag handling
- *  MER  -> merging of consec. buffers
- *
- */
-
-#define dprint(flg, format...)                 \
-({                                             \
-       if (NDEBUG & (flg))                     \
-               printk(KERN_DEBUG format);      \
-})
-
-#define ARB_PRINTK(format, args...) \
-       dprint(NDEBUG_ARBITRATION, format , ## args)
-#define ASEN_PRINTK(format, args...) \
-       dprint(NDEBUG_AUTOSENSE, format , ## args)
-#define DMA_PRINTK(format, args...) \
-       dprint(NDEBUG_DMA, format , ## args)
-#define HSH_PRINTK(format, args...) \
-       dprint(NDEBUG_HANDSHAKE, format , ## args)
-#define INF_PRINTK(format, args...) \
-       dprint(NDEBUG_INFORMATION, format , ## args)
-#define INI_PRINTK(format, args...) \
-       dprint(NDEBUG_INIT, format , ## args)
-#define INT_PRINTK(format, args...) \
-       dprint(NDEBUG_INTR, format , ## args)
-#define LNK_PRINTK(format, args...) \
-       dprint(NDEBUG_LINKED, format , ## args)
-#define MAIN_PRINTK(format, args...) \
-       dprint(NDEBUG_MAIN, format , ## args)
-#define NDAT_PRINTK(format, args...) \
-       dprint(NDEBUG_NO_DATAOUT, format , ## args)
-#define NWR_PRINTK(format, args...) \
-       dprint(NDEBUG_NO_WRITE, format , ## args)
-#define PIO_PRINTK(format, args...) \
-       dprint(NDEBUG_PIO, format , ## args)
-#define PDMA_PRINTK(format, args...) \
-       dprint(NDEBUG_PSEUDO_DMA, format , ## args)
-#define QU_PRINTK(format, args...) \
-       dprint(NDEBUG_QUEUES, format , ## args)
-#define RSL_PRINTK(format, args...) \
-       dprint(NDEBUG_RESELECTION, format , ## args)
-#define SEL_PRINTK(format, args...) \
-       dprint(NDEBUG_SELECTION, format , ## args)
-#define USL_PRINTK(format, args...) \
-       dprint(NDEBUG_USLEEP, format , ## args)
-#define LBS_PRINTK(format, args...) \
-       dprint(NDEBUG_LAST_BYTE_SENT, format , ## args)
-#define RSS_PRINTK(format, args...) \
-       dprint(NDEBUG_RESTART_SELECT, format , ## args)
-#define EXT_PRINTK(format, args...) \
-       dprint(NDEBUG_EXTENDED, format , ## args)
-#define ABRT_PRINTK(format, args...) \
-       dprint(NDEBUG_ABORT, format , ## args)
-#define TAG_PRINTK(format, args...) \
-       dprint(NDEBUG_TAGS, format , ## args)
-#define MER_PRINTK(format, args...) \
-       dprint(NDEBUG_MERGING, format , ## args)
-
-/* conditional macros for NCR5380_print_{,phase,status} */
-
-#define NCR_PRINT(mask)        \
-       ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
-
-#define NCR_PRINT_PHASE(mask) \
-       ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
-
-#define NCR_PRINT_STATUS(mask) \
-       ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
-
-
 #endif /* ndef ASM */
 #endif /* ATARI_SCSI_H */
 
index 1bfb0bd01198679fac77fb0c2c73a6e8d4ef57e9..860f527d8f26bd4e3302e5e77ab02cb5ba74f8b8 100644 (file)
@@ -83,9 +83,20 @@ static inline void queue_tail_inc(struct be_queue_info *q)
 
 /*ISCSI */
 
+struct be_aic_obj {            /* Adaptive interrupt coalescing (AIC) info */
+       bool enable;
+       u32 min_eqd;            /* in usecs */
+       u32 max_eqd;            /* in usecs */
+       u32 prev_eqd;           /* in usecs */
+       u32 et_eqd;             /* configured val when aic is off */
+       ulong jiffs;
+       u64 eq_prev;            /* Used to calculate eqe */
+};
+
 struct be_eq_obj {
        bool todo_mcc_cq;
        bool todo_cq;
+       u32 cq_count;
        struct be_queue_info q;
        struct beiscsi_hba *phba;
        struct be_queue_info *cq;
index 7cf7f99ee44238a874e193a09d24d13f69db759b..cc7405c0eca085852adf3142c18759914538d3a0 100644 (file)
@@ -71,6 +71,7 @@ struct be_mcc_wrb {
 #define BEISCSI_FW_MBX_TIMEOUT 100
 
 /* MBOX Command VER */
+#define MBX_CMD_VER1   0x01
 #define MBX_CMD_VER2   0x02
 
 struct be_mcc_compl {
@@ -271,6 +272,12 @@ struct be_cmd_resp_eq_create {
        u16 rsvd0;              /* sword */
 } __packed;
 
+struct be_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+} __packed;
+
 struct mgmt_chap_format {
        u32 flags;
        u8  intr_chap_name[256];
@@ -622,7 +629,7 @@ struct be_cmd_req_modify_eq_delay {
                u32 eq_id;
                u32 phase;
                u32 delay_multiplier;
-       } delay[8];
+       } delay[MAX_CPUS];
 } __packed;
 
 /******************** Get MAC ADDR *******************/
@@ -708,6 +715,8 @@ unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
 
 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
 
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba, struct be_set_eqd *,
+                           int num);
 int beiscsi_mccq_compl(struct beiscsi_hba *phba,
                        uint32_t tag, struct be_mcc_wrb **wrb,
                        struct be_dma_mem *mbx_cmd_mem);
@@ -1005,6 +1014,26 @@ struct tcp_connect_and_offload_in {
        u8 rsvd0[3];
 } __packed;
 
+struct tcp_connect_and_offload_in_v1 {
+       struct be_cmd_req_hdr hdr;
+       struct ip_addr_format ip_address;
+       u16 tcp_port;
+       u16 cid;
+       u16 cq_id;
+       u16 defq_id;
+       struct phys_addr dataout_template_pa;
+       u16 hdr_ring_id;
+       u16 data_ring_id;
+       u8 do_offload;
+       u8 ifd_state;
+       u8 rsvd0[2];
+       u16 tcp_window_size;
+       u8 tcp_window_scale_count;
+       u8 rsvd1;
+       u32 tcp_mss:24;
+       u8 rsvd2;
+} __packed;
+
 struct tcp_connect_and_offload_out {
        struct be_cmd_resp_hdr hdr;
        u32 connection_handle;
index a3df43324c9834816d20eab213f2b08465078f35..fd284ff36ecf28fb05581619b1c8ca0e34eac018 100644 (file)
@@ -1106,7 +1106,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
        struct beiscsi_hba *phba = beiscsi_ep->phba;
        struct tcp_connect_and_offload_out *ptcpcnct_out;
        struct be_dma_mem nonemb_cmd;
-       unsigned int tag;
+       unsigned int tag, req_memsize;
        int ret = -ENOMEM;
 
        beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1127,8 +1127,14 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                       (beiscsi_ep->ep_cid)] = ep;
 
        beiscsi_ep->cid_vld = 0;
+
+       if (is_chip_be2_be3r(phba))
+               req_memsize = sizeof(struct tcp_connect_and_offload_in);
+       else
+               req_memsize = sizeof(struct tcp_connect_and_offload_in_v1);
+
        nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
-                               sizeof(struct tcp_connect_and_offload_in),
+                               req_memsize,
                                &nonemb_cmd.dma);
        if (nonemb_cmd.va == NULL) {
 
@@ -1139,7 +1145,7 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
                beiscsi_free_ep(beiscsi_ep);
                return -ENOMEM;
        }
-       nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in);
+       nonemb_cmd.size = req_memsize;
        memset(nonemb_cmd.va, 0, nonemb_cmd.size);
        tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd);
        if (tag <= 0) {
index 0d822297aa80d4bbc0c665f903ea1bf975ae5203..554349029628417d03b163cb066b94b6b0552c05 100644 (file)
@@ -599,15 +599,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
        pci_set_drvdata(pcidev, phba);
        phba->interface_handle = 0xFFFFFFFF;
 
-       if (iscsi_host_add(shost, &phba->pcidev->dev))
-               goto free_devices;
-
        return phba;
-
-free_devices:
-       pci_dev_put(phba->pcidev);
-       iscsi_host_free(phba->shost);
-       return NULL;
 }
 
 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
@@ -2279,6 +2271,7 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
 
        pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
        ret = beiscsi_process_cq(pbe_eq);
+       pbe_eq->cq_count += ret;
        if (ret < budget) {
                phba = pbe_eq->phba;
                blk_iopoll_complete(iop);
@@ -3692,7 +3685,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
        struct hwi_controller *phwi_ctrlr;
        struct hwi_context_memory *phwi_context;
        struct hwi_async_pdu_context *pasync_ctx;
-       int i, eq_num, ulp_num;
+       int i, eq_for_mcc, ulp_num;
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -3729,16 +3722,17 @@ static void hwi_cleanup(struct beiscsi_hba *phba)
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
        }
+
+       be_mcc_queues_destroy(phba);
        if (phba->msix_enabled)
-               eq_num = 1;
+               eq_for_mcc = 1;
        else
-               eq_num = 0;
-       for (i = 0; i < (phba->num_cpus + eq_num); i++) {
+               eq_for_mcc = 0;
+       for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
                q = &phwi_context->be_eq[i].q;
                if (q->created)
                        beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
        }
-       be_mcc_queues_destroy(phba);
        be_cmd_fw_uninit(ctrl);
 }
 
@@ -3833,9 +3827,9 @@ static int hwi_init_port(struct beiscsi_hba *phba)
 
        phwi_ctrlr = phba->phwi_ctrlr;
        phwi_context = phwi_ctrlr->phwi_ctxt;
-       phwi_context->max_eqd = 0;
+       phwi_context->max_eqd = 128;
        phwi_context->min_eqd = 0;
-       phwi_context->cur_eqd = 64;
+       phwi_context->cur_eqd = 0;
        be_cmd_fw_initialize(&phba->ctrl);
 
        status = beiscsi_create_eqs(phba, phwi_context);
@@ -5290,6 +5284,57 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
        return;
 }
 
+static void be_eqd_update(struct beiscsi_hba *phba)
+{
+       struct be_set_eqd set_eqd[MAX_CPUS];
+       struct be_aic_obj *aic;
+       struct be_eq_obj *pbe_eq;
+       struct hwi_controller *phwi_ctrlr;
+       struct hwi_context_memory *phwi_context;
+       int eqd, i, num = 0;
+       ulong now;
+       u32 pps, delta;
+       unsigned int tag;
+
+       phwi_ctrlr = phba->phwi_ctrlr;
+       phwi_context = phwi_ctrlr->phwi_ctxt;
+
+       for (i = 0; i <= phba->num_cpus; i++) {
+               aic = &phba->aic_obj[i];
+               pbe_eq = &phwi_context->be_eq[i];
+               now = jiffies;
+               if (!aic->jiffs || time_before(now, aic->jiffs) ||
+                   pbe_eq->cq_count < aic->eq_prev) {
+                       aic->jiffs = now;
+                       aic->eq_prev = pbe_eq->cq_count;
+                       continue;
+               }
+               delta = jiffies_to_msecs(now - aic->jiffs);
+               pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
+               eqd = (pps / 1500) << 2;
+
+               if (eqd < 8)
+                       eqd = 0;
+               eqd = min_t(u32, eqd, phwi_context->max_eqd);
+               eqd = max_t(u32, eqd, phwi_context->min_eqd);
+
+               aic->jiffs = now;
+               aic->eq_prev = pbe_eq->cq_count;
+
+               if (eqd != aic->prev_eqd) {
+                       set_eqd[num].delay_multiplier = (eqd * 65)/100;
+                       set_eqd[num].eq_id = pbe_eq->q.id;
+                       aic->prev_eqd = eqd;
+                       num++;
+               }
+       }
+       if (num) {
+               tag = be_cmd_modify_eq_delay(phba, set_eqd, num);
+               if (tag)
+                       beiscsi_mccq_compl(phba, tag, NULL, NULL);
+       }
+}
+
 /*
  * beiscsi_hw_health_check()- Check adapter health
  * @work: work item to check HW health
@@ -5303,6 +5348,8 @@ beiscsi_hw_health_check(struct work_struct *work)
                container_of(work, struct beiscsi_hba,
                             beiscsi_hw_check_task.work);
 
+       be_eqd_update(phba);
+
        beiscsi_ue_detect(phba);
 
        schedule_delayed_work(&phba->beiscsi_hw_check_task,
@@ -5579,7 +5626,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
                phba->ctrl.mcc_numtag[i + 1] = 0;
                phba->ctrl.mcc_tag_available++;
                memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
-                      sizeof(struct beiscsi_mcc_tag_state));
+                      sizeof(struct be_dma_mem));
        }
 
        phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
@@ -5621,6 +5668,9 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev,
        }
        hwi_enable_intr(phba);
 
+       if (iscsi_host_add(phba->shost, &phba->pcidev->dev))
+               goto free_blkenbld;
+
        if (beiscsi_setup_boot_info(phba))
                /*
                 * log error but continue, because we may not be using
index 9380b55bdeaf754ec0df495b4db87e2872bc511f..9ceab426eec97ca718d67e9505b43e4acb78fb6c 100644 (file)
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME               "be2iscsi"
-#define BUILD_STR              "10.2.125.0"
+#define BUILD_STR              "10.2.273.0"
 #define BE_NAME                        "Emulex OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
@@ -71,8 +71,8 @@
 
 #define BEISCSI_SGLIST_ELEMENTS        30
 
-#define BEISCSI_CMD_PER_LUN    128     /* scsi_host->cmd_per_lun */
-#define BEISCSI_MAX_SECTORS    2048    /* scsi_host->max_sectors */
+#define BEISCSI_CMD_PER_LUN    128 /* scsi_host->cmd_per_lun */
+#define BEISCSI_MAX_SECTORS    1024 /* scsi_host->max_sectors */
 #define BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE 128 /* Template size per cxn */
 
 #define BEISCSI_MAX_CMD_LEN    16      /* scsi_host->max_cmd_len */
@@ -427,6 +427,7 @@ struct beiscsi_hba {
        struct mgmt_session_info boot_sess;
        struct invalidate_command_table inv_tbl[128];
 
+       struct be_aic_obj aic_obj[MAX_CPUS];
        unsigned int attr_log_enable;
        int (*iotask_fn)(struct iscsi_task *,
                        struct scatterlist *sg,
index 088bdf752cfafac10630ce830e4b12f6602dbf4a..6045aa78986ac1e7ee828e4aab859a457f52faff 100644 (file)
@@ -155,6 +155,43 @@ void beiscsi_ue_detect(struct beiscsi_hba *phba)
        }
 }
 
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+                struct be_set_eqd *set_eqd, int num)
+{
+       struct be_ctrl_info *ctrl = &phba->ctrl;
+       struct be_mcc_wrb *wrb;
+       struct be_cmd_req_modify_eq_delay *req;
+       unsigned int tag = 0;
+       int i;
+
+       spin_lock(&ctrl->mbox_lock);
+       tag = alloc_mcc_tag(phba);
+       if (!tag) {
+               spin_unlock(&ctrl->mbox_lock);
+               return tag;
+       }
+
+       wrb = wrb_from_mccq(phba);
+       req = embedded_payload(wrb);
+
+       wrb->tag0 |= tag;
+       be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
+       be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+               OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
+
+       req->num_eq = cpu_to_le32(num);
+       for (i = 0; i < num; i++) {
+               req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
+               req->delay[i].phase = 0;
+               req->delay[i].delay_multiplier =
+                               cpu_to_le32(set_eqd[i].delay_multiplier);
+       }
+
+       be_mcc_notify(phba);
+       spin_unlock(&ctrl->mbox_lock);
+       return tag;
+}
+
 /**
  * mgmt_reopen_session()- Reopen a session based on reopen_type
  * @phba: Device priv structure instance
@@ -447,8 +484,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                                         struct be_dma_mem *nonemb_cmd)
 {
        struct be_cmd_resp_hdr *resp;
-       struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
-       struct be_sge *mcc_sge = nonembedded_sgl(wrb);
+       struct be_mcc_wrb *wrb;
+       struct be_sge *mcc_sge;
        unsigned int tag = 0;
        struct iscsi_bsg_request *bsg_req = job->request;
        struct be_bsg_vendor_cmd *req = nonemb_cmd->va;
@@ -465,7 +502,6 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
        req->sector = sector;
        req->offset = offset;
        spin_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
 
        switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
        case BEISCSI_WRITE_FLASH:
@@ -495,6 +531,8 @@ unsigned int mgmt_vendor_specific_fw_cmd(struct be_ctrl_info *ctrl,
                return tag;
        }
 
+       wrb = wrb_from_mccq(phba);
+       mcc_sge = nonembedded_sgl(wrb);
        be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false,
                           job->request_payload.sg_cnt);
        mcc_sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
@@ -525,7 +563,6 @@ int mgmt_epfw_cleanup(struct beiscsi_hba *phba, unsigned short ulp_num)
        int status = 0;
 
        spin_lock(&ctrl->mbox_lock);
-       memset(wrb, 0, sizeof(*wrb));
 
        be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
@@ -675,7 +712,7 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        struct sockaddr_in6 *daddr_in6 = (struct sockaddr_in6 *)dst_addr;
        struct be_ctrl_info *ctrl = &phba->ctrl;
        struct be_mcc_wrb *wrb;
-       struct tcp_connect_and_offload_in *req;
+       struct tcp_connect_and_offload_in_v1 *req;
        unsigned short def_hdr_id;
        unsigned short def_data_id;
        struct phys_addr template_address = { 0, 0 };
@@ -702,17 +739,16 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
                return tag;
        }
        wrb = wrb_from_mccq(phba);
-       memset(wrb, 0, sizeof(*wrb));
        sge = nonembedded_sgl(wrb);
 
        req = nonemb_cmd->va;
        memset(req, 0, sizeof(*req));
        wrb->tag0 |= tag;
 
-       be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1);
+       be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
                           OPCODE_COMMON_ISCSI_TCP_CONNECT_AND_OFFLOAD,
-                          sizeof(*req));
+                          nonemb_cmd->size);
        if (dst_addr->sa_family == PF_INET) {
                __be32 s_addr = daddr_in->sin_addr.s_addr;
                req->ip_address.ip_type = BE2_IPV4;
@@ -758,6 +794,13 @@ int mgmt_open_connection(struct beiscsi_hba *phba,
        sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
        sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
        sge->len = cpu_to_le32(nonemb_cmd->size);
+
+       if (!is_chip_be2_be3r(phba)) {
+               req->hdr.version = MBX_CMD_VER1;
+               req->tcp_window_size = 0;
+               req->tcp_window_scale_count = 2;
+       }
+
        be_mcc_notify(phba);
        spin_unlock(&ctrl->mbox_lock);
        return tag;
@@ -804,7 +847,7 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
                                int resp_buf_len)
 {
        struct be_ctrl_info *ctrl = &phba->ctrl;
-       struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
+       struct be_mcc_wrb *wrb;
        struct be_sge *sge;
        unsigned int tag;
        int rc = 0;
@@ -816,7 +859,8 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
                rc = -ENOMEM;
                goto free_cmd;
        }
-       memset(wrb, 0, sizeof(*wrb));
+
+       wrb = wrb_from_mccq(phba);
        wrb->tag0 |= tag;
        sge = nonembedded_sgl(wrb);
 
index 01b8c97284c0653753f6bc8085c47f799ce09f79..24a8fc577477394b2c674905eb3227d12bd9feb1 100644 (file)
@@ -335,5 +335,7 @@ void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
 void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
                             struct wrb_handle *pwrb_handle);
 void beiscsi_ue_detect(struct beiscsi_hba *phba);
+int be_cmd_modify_eq_delay(struct beiscsi_hba *phba,
+                        struct be_set_eqd *, int num);
 
 #endif
index cc0fbcdc5192a81d8a7a23038a3f42d7a990ecaf..7593b7c1d3367e81a5a323cf5feb7805e1846620 100644 (file)
@@ -507,7 +507,7 @@ bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
        struct bfad_vport_s   *vport;
        int rc;
 
-       vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
+       vport = kzalloc(sizeof(struct bfad_vport_s), GFP_ATOMIC);
        if (!vport) {
                bfa_trc(bfad, 0);
                return;
index 46a37657307fd9dae74e800709f80f2140da65a5..512aed3ae4f1c70ba856908e444890f945be784f 100644 (file)
@@ -1966,26 +1966,29 @@ static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
 {
        int i;
        int segment_count;
-       int hash_table_size;
        u32 *pbl;
 
-       segment_count = hba->hash_tbl_segment_count;
-       hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
-               sizeof(struct fcoe_hash_table_entry);
+       if (hba->hash_tbl_segments) {
 
-       pbl = hba->hash_tbl_pbl;
-       for (i = 0; i < segment_count; ++i) {
-               dma_addr_t dma_address;
+               pbl = hba->hash_tbl_pbl;
+               if (pbl) {
+                       segment_count = hba->hash_tbl_segment_count;
+                       for (i = 0; i < segment_count; ++i) {
+                               dma_addr_t dma_address;
 
-               dma_address = le32_to_cpu(*pbl);
-               ++pbl;
-               dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
-               ++pbl;
-               dma_free_coherent(&hba->pcidev->dev,
-                                 BNX2FC_HASH_TBL_CHUNK_SIZE,
-                                 hba->hash_tbl_segments[i],
-                                 dma_address);
+                               dma_address = le32_to_cpu(*pbl);
+                               ++pbl;
+                               dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
+                               ++pbl;
+                               dma_free_coherent(&hba->pcidev->dev,
+                                                 BNX2FC_HASH_TBL_CHUNK_SIZE,
+                                                 hba->hash_tbl_segments[i],
+                                                 dma_address);
+                       }
+               }
 
+               kfree(hba->hash_tbl_segments);
+               hba->hash_tbl_segments = NULL;
        }
 
        if (hba->hash_tbl_pbl) {
@@ -2023,7 +2026,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
        dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
        if (!dma_segment_array) {
                printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
-               return -ENOMEM;
+               goto cleanup_ht;
        }
 
        for (i = 0; i < segment_count; ++i) {
@@ -2034,15 +2037,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
                                           GFP_KERNEL);
                if (!hba->hash_tbl_segments[i]) {
                        printk(KERN_ERR PFX "hash segment alloc failed\n");
-                       while (--i >= 0) {
-                               dma_free_coherent(&hba->pcidev->dev,
-                                                   BNX2FC_HASH_TBL_CHUNK_SIZE,
-                                                   hba->hash_tbl_segments[i],
-                                                   dma_segment_array[i]);
-                               hba->hash_tbl_segments[i] = NULL;
-                       }
-                       kfree(dma_segment_array);
-                       return -ENOMEM;
+                       goto cleanup_dma;
                }
                memset(hba->hash_tbl_segments[i], 0,
                       BNX2FC_HASH_TBL_CHUNK_SIZE);
@@ -2054,8 +2049,7 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
                                               GFP_KERNEL);
        if (!hba->hash_tbl_pbl) {
                printk(KERN_ERR PFX "hash table pbl alloc failed\n");
-               kfree(dma_segment_array);
-               return -ENOMEM;
+               goto cleanup_dma;
        }
        memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
 
@@ -2080,6 +2074,22 @@ static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
        }
        kfree(dma_segment_array);
        return 0;
+
+cleanup_dma:
+       for (i = 0; i < segment_count; ++i) {
+               if (hba->hash_tbl_segments[i])
+                       dma_free_coherent(&hba->pcidev->dev,
+                                           BNX2FC_HASH_TBL_CHUNK_SIZE,
+                                           hba->hash_tbl_segments[i],
+                                           dma_segment_array[i]);
+       }
+
+       kfree(dma_segment_array);
+
+cleanup_ht:
+       kfree(hba->hash_tbl_segments);
+       hba->hash_tbl_segments = NULL;
+       return -ENOMEM;
 }
 
 /**
index eb29fe7eaf49b2ff84d2219c349bc5ac80a12529..0a667fe05006aa29046e00b3e84bcd375afbc91a 100644 (file)
@@ -3,8 +3,6 @@
 #define PSEUDO_DMA
 #define DONT_USE_INTR
 #define UNSAFE                 /* Leave interrupts enabled during pseudo-dma I/O */
-#define xNDEBUG (NDEBUG_INTR+NDEBUG_RESELECTION+\
-                NDEBUG_SELECTION+NDEBUG_ARBITRATION)
 #define DMA_WORKS_RIGHT
 
 
index f37f3e3dd5d5af98a3744f74b93761183d847f29..6504a195c874fbae42d49e60f8bc9cf56682a3b6 100644 (file)
@@ -390,7 +390,7 @@ static int esas2r_probe(struct pci_dev *pcid,
        esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
                       "pci_enable_device() OK");
        esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
-                      "after pci_device_enable() enable_cnt: %d",
+                      "after pci_enable_device() enable_cnt: %d",
                       pcid->enable_cnt.counter);
 
        host = scsi_host_alloc(&driver_template, host_alloc_size);
index 528d43b7b569eceee49b65a6a19d06150c55283c..1d3521e13d77b5322d3ed2bf2fd20247a2f73989 100644 (file)
 
 #define DRV_NAME               "fnic"
 #define DRV_DESCRIPTION                "Cisco FCoE HBA Driver"
-#define DRV_VERSION            "1.5.0.45"
+#define DRV_VERSION            "1.6.0.10"
 #define PFX                    DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
 #define DESC_CLEAN_LOW_WATERMARK 8
 #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD        16 /* UCSM default throttle count */
 #define FNIC_MIN_IO_REQ                        256 /* Min IO throttle count */
-#define FNIC_MAX_IO_REQ                2048 /* scsi_cmnd tag map entries */
+#define FNIC_MAX_IO_REQ                1024 /* scsi_cmnd tag map entries */
+#define FNIC_DFLT_IO_REQ        256 /* Default scsi_cmnd tag map entries */
 #define        FNIC_IO_LOCKS           64 /* IO locks: power of 2 */
 #define FNIC_DFLT_QUEUE_DEPTH  32
 #define        FNIC_STATS_RATE_LIMIT   4 /* limit rate at which stats are pulled up */
index b6073f875761bf49b4828decd33bc1a8aafe8ead..2c613bdea78f9b2ed400acb25ad5416a01674177 100644 (file)
@@ -25,6 +25,21 @@ static struct dentry *fnic_trace_debugfs_file;
 static struct dentry *fnic_trace_enable;
 static struct dentry *fnic_stats_debugfs_root;
 
+static struct dentry *fnic_fc_trace_debugfs_file;
+static struct dentry *fnic_fc_rdata_trace_debugfs_file;
+static struct dentry *fnic_fc_trace_enable;
+static struct dentry *fnic_fc_trace_clear;
+
+struct fc_trace_flag_type {
+       u8 fc_row_file;
+       u8 fc_normal_file;
+       u8 fnic_trace;
+       u8 fc_trace;
+       u8 fc_clear;
+};
+
+static struct fc_trace_flag_type *fc_trc_flag;
+
 /*
  * fnic_debugfs_init - Initialize debugfs for fnic debug logging
  *
@@ -56,6 +71,18 @@ int fnic_debugfs_init(void)
                return rc;
        }
 
+       /* Allocate memory to structure */
+       fc_trc_flag = (struct fc_trace_flag_type *)
+               vmalloc(sizeof(struct fc_trace_flag_type));
+
+       if (fc_trc_flag) {
+               fc_trc_flag->fc_row_file = 0;
+               fc_trc_flag->fc_normal_file = 1;
+               fc_trc_flag->fnic_trace = 2;
+               fc_trc_flag->fc_trace = 3;
+               fc_trc_flag->fc_clear = 4;
+       }
+
        rc = 0;
        return rc;
 }
@@ -74,15 +101,19 @@ void fnic_debugfs_terminate(void)
 
        debugfs_remove(fnic_trace_debugfs_root);
        fnic_trace_debugfs_root = NULL;
+
+       if (fc_trc_flag)
+               vfree(fc_trc_flag);
 }
 
 /*
- * fnic_trace_ctrl_open - Open the trace_enable file
+ * fnic_trace_ctrl_open - Open the trace_enable file for fnic_trace
+ *               Or Open fc_trace_enable file for fc_trace
  * @inode: The inode pointer.
  * @file: The file pointer to attach the trace enable/disable flag.
  *
  * Description:
- * This routine opens a debugsfs file trace_enable.
+ * This routine opens a debugsfs file trace_enable or fc_trace_enable.
  *
  * Returns:
  * This function returns zero if successful.
@@ -94,15 +125,19 @@ static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
 }
 
 /*
- * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * fnic_trace_ctrl_read -
+ *          Read  trace_enable ,fc_trace_enable
+ *              or fc_trace_clear debugfs file
  * @filp: The file pointer to read from.
  * @ubuf: The buffer to copy the data to.
  * @cnt: The number of bytes to read.
  * @ppos: The position in the file to start reading from.
  *
  * Description:
- * This routine reads value of variable fnic_tracing_enabled
- * and stores into local @buf. It will start reading file at @ppos and
+ * This routine reads value of variable fnic_tracing_enabled or
+ * fnic_fc_tracing_enabled or fnic_fc_trace_cleared
+ * and stores into local @buf.
+ * It will start reading file at @ppos and
  * copy up to @cnt of data to @ubuf from @buf.
  *
  * Returns:
@@ -114,13 +149,25 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
 {
        char buf[64];
        int len;
-       len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+       u8 *trace_type;
+       len = 0;
+       trace_type = (u8 *)filp->private_data;
+       if (*trace_type == fc_trc_flag->fnic_trace)
+               len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+       else if (*trace_type == fc_trc_flag->fc_trace)
+               len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+       else if (*trace_type == fc_trc_flag->fc_clear)
+               len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+       else
+               pr_err("fnic: Cannot read to any debugfs file\n");
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
 }
 
 /*
- * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * fnic_trace_ctrl_write -
+ * Write to trace_enable, fc_trace_enable or
+ *         fc_trace_clear debugfs file
  * @filp: The file pointer to write from.
  * @ubuf: The buffer to copy the data from.
  * @cnt: The number of bytes to write.
@@ -128,7 +175,8 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
  *
  * Description:
  * This routine writes data from user buffer @ubuf to buffer @buf and
- * sets fnic_tracing_enabled value as per user input.
+ * sets fc_trace_enable ,tracing_enable or fnic_fc_trace_cleared
+ * value as per user input.
  *
  * Returns:
  * This function returns the amount of data that was written.
@@ -140,6 +188,8 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
        char buf[64];
        unsigned long val;
        int ret;
+       u8 *trace_type;
+       trace_type = (u8 *)filp->private_data;
 
        if (cnt >= sizeof(buf))
                return -EINVAL;
@@ -153,12 +203,27 @@ static ssize_t fnic_trace_ctrl_write(struct file *filp,
        if (ret < 0)
                return ret;
 
-       fnic_tracing_enabled = val;
+       if (*trace_type == fc_trc_flag->fnic_trace)
+               fnic_tracing_enabled = val;
+       else if (*trace_type == fc_trc_flag->fc_trace)
+               fnic_fc_tracing_enabled = val;
+       else if (*trace_type == fc_trc_flag->fc_clear)
+               fnic_fc_trace_cleared = val;
+       else
+               pr_err("fnic: cannot write to any debufs file\n");
+
        (*ppos)++;
 
        return cnt;
 }
 
+static const struct file_operations fnic_trace_ctrl_fops = {
+       .owner = THIS_MODULE,
+       .open = fnic_trace_ctrl_open,
+       .read = fnic_trace_ctrl_read,
+       .write = fnic_trace_ctrl_write,
+};
+
 /*
  * fnic_trace_debugfs_open - Open the fnic trace log
  * @inode: The inode pointer
@@ -178,19 +243,36 @@ static int fnic_trace_debugfs_open(struct inode *inode,
                                  struct file *file)
 {
        fnic_dbgfs_t *fnic_dbg_prt;
+       u8 *rdata_ptr;
+       rdata_ptr = (u8 *)inode->i_private;
        fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
        if (!fnic_dbg_prt)
                return -ENOMEM;
 
-       fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
-       if (!fnic_dbg_prt->buffer) {
-               kfree(fnic_dbg_prt);
-               return -ENOMEM;
+       if (*rdata_ptr == fc_trc_flag->fnic_trace) {
+               fnic_dbg_prt->buffer = vmalloc(3 *
+                                       (trace_max_pages * PAGE_SIZE));
+               if (!fnic_dbg_prt->buffer) {
+                       kfree(fnic_dbg_prt);
+                       return -ENOMEM;
+               }
+               memset((void *)fnic_dbg_prt->buffer, 0,
+               3 * (trace_max_pages * PAGE_SIZE));
+               fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+       } else {
+               fnic_dbg_prt->buffer =
+                       vmalloc(3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+               if (!fnic_dbg_prt->buffer) {
+                       kfree(fnic_dbg_prt);
+                       return -ENOMEM;
+               }
+               memset((void *)fnic_dbg_prt->buffer, 0,
+                       3 * (fnic_fc_trace_max_pages * PAGE_SIZE));
+               fnic_dbg_prt->buffer_len =
+                       fnic_fc_trace_get_data(fnic_dbg_prt, *rdata_ptr);
        }
-       memset((void *)fnic_dbg_prt->buffer, 0,
-                         (3*(trace_max_pages * PAGE_SIZE)));
-       fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
        file->private_data = fnic_dbg_prt;
+
        return 0;
 }
 
@@ -272,13 +354,6 @@ static int fnic_trace_debugfs_release(struct inode *inode,
        return 0;
 }
 
-static const struct file_operations fnic_trace_ctrl_fops = {
-       .owner = THIS_MODULE,
-       .open = fnic_trace_ctrl_open,
-       .read = fnic_trace_ctrl_read,
-       .write = fnic_trace_ctrl_write,
-};
-
 static const struct file_operations fnic_trace_debugfs_fops = {
        .owner = THIS_MODULE,
        .open = fnic_trace_debugfs_open,
@@ -306,9 +381,10 @@ int fnic_trace_debugfs_init(void)
                return rc;
        }
        fnic_trace_enable = debugfs_create_file("tracing_enable",
-                                         S_IFREG|S_IRUGO|S_IWUSR,
-                                         fnic_trace_debugfs_root,
-                                         NULL, &fnic_trace_ctrl_fops);
+                                       S_IFREG|S_IRUGO|S_IWUSR,
+                                       fnic_trace_debugfs_root,
+                                       &(fc_trc_flag->fnic_trace),
+                                       &fnic_trace_ctrl_fops);
 
        if (!fnic_trace_enable) {
                printk(KERN_DEBUG
@@ -317,10 +393,10 @@ int fnic_trace_debugfs_init(void)
        }
 
        fnic_trace_debugfs_file = debugfs_create_file("trace",
-                                                 S_IFREG|S_IRUGO|S_IWUSR,
-                                                 fnic_trace_debugfs_root,
-                                                 NULL,
-                                                 &fnic_trace_debugfs_fops);
+                                       S_IFREG|S_IRUGO|S_IWUSR,
+                                       fnic_trace_debugfs_root,
+                                       &(fc_trc_flag->fnic_trace),
+                                       &fnic_trace_debugfs_fops);
 
        if (!fnic_trace_debugfs_file) {
                printk(KERN_DEBUG
@@ -340,14 +416,104 @@ int fnic_trace_debugfs_init(void)
  */
 void fnic_trace_debugfs_terminate(void)
 {
-       if (fnic_trace_debugfs_file) {
-               debugfs_remove(fnic_trace_debugfs_file);
-               fnic_trace_debugfs_file = NULL;
+       debugfs_remove(fnic_trace_debugfs_file);
+       fnic_trace_debugfs_file = NULL;
+
+       debugfs_remove(fnic_trace_enable);
+       fnic_trace_enable = NULL;
+}
+
+/*
+ * fnic_fc_trace_debugfs_init -
+ * Initialize debugfs for fnic control frame trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic_fc debugfs
+ * file system. If not already created, this routine will create the
+ * create file trace to log fnic fc trace buffer output into debugfs and
+ * it will also create file fc_trace_enable to control enable/disable of
+ * trace logging into trace buffer.
+ */
+
+int fnic_fc_trace_debugfs_init(void)
+{
+       int rc = -1;
+
+       if (!fnic_trace_debugfs_root) {
+               pr_err("fnic:Debugfs root directory doesn't exist\n");
+               return rc;
+       }
+
+       fnic_fc_trace_enable = debugfs_create_file("fc_trace_enable",
+                                       S_IFREG|S_IRUGO|S_IWUSR,
+                                       fnic_trace_debugfs_root,
+                                       &(fc_trc_flag->fc_trace),
+                                       &fnic_trace_ctrl_fops);
+
+       if (!fnic_fc_trace_enable) {
+               pr_err("fnic: Failed create fc_trace_enable file\n");
+               return rc;
+       }
+
+       fnic_fc_trace_clear = debugfs_create_file("fc_trace_clear",
+                                       S_IFREG|S_IRUGO|S_IWUSR,
+                                       fnic_trace_debugfs_root,
+                                       &(fc_trc_flag->fc_clear),
+                                       &fnic_trace_ctrl_fops);
+
+       if (!fnic_fc_trace_clear) {
+               pr_err("fnic: Failed to create fc_trace_enable file\n");
+               return rc;
+       }
+
+       fnic_fc_rdata_trace_debugfs_file =
+               debugfs_create_file("fc_trace_rdata",
+                                   S_IFREG|S_IRUGO|S_IWUSR,
+                                   fnic_trace_debugfs_root,
+                                   &(fc_trc_flag->fc_normal_file),
+                                   &fnic_trace_debugfs_fops);
+
+       if (!fnic_fc_rdata_trace_debugfs_file) {
+               pr_err("fnic: Failed create fc_rdata_trace file\n");
+               return rc;
        }
-       if (fnic_trace_enable) {
-               debugfs_remove(fnic_trace_enable);
-               fnic_trace_enable = NULL;
+
+       fnic_fc_trace_debugfs_file =
+               debugfs_create_file("fc_trace",
+                                   S_IFREG|S_IRUGO|S_IWUSR,
+                                   fnic_trace_debugfs_root,
+                                   &(fc_trc_flag->fc_row_file),
+                                   &fnic_trace_debugfs_fops);
+
+       if (!fnic_fc_trace_debugfs_file) {
+               pr_err("fnic: Failed to create fc_trace file\n");
+               return rc;
        }
+       rc = 0;
+       return rc;
+}
+
+/*
+ * fnic_fc_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic_fc trace logging.
+ */
+
+void fnic_fc_trace_debugfs_terminate(void)
+{
+       debugfs_remove(fnic_fc_trace_debugfs_file);
+       fnic_fc_trace_debugfs_file = NULL;
+
+       debugfs_remove(fnic_fc_rdata_trace_debugfs_file);
+       fnic_fc_rdata_trace_debugfs_file = NULL;
+
+       debugfs_remove(fnic_fc_trace_enable);
+       fnic_fc_trace_enable = NULL;
+
+       debugfs_remove(fnic_fc_trace_clear);
+       fnic_fc_trace_clear = NULL;
 }
 
 /*
index 1671325aec7f11bba5c5c557502006554457b937..1b948f633fc555f7b99a48cfe61941f0e6788f1a 100644 (file)
@@ -66,19 +66,35 @@ void fnic_handle_link(struct work_struct *work)
        fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev);
 
        if (old_link_status == fnic->link_status) {
-               if (!fnic->link_status)
+               if (!fnic->link_status) {
                        /* DOWN -> DOWN */
                        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
-               else {
+                       fnic_fc_trace_set_data(fnic->lport->host->host_no,
+                               FNIC_FC_LE, "Link Status: DOWN->DOWN",
+                               strlen("Link Status: DOWN->DOWN"));
+               } else {
                        if (old_link_down_cnt != fnic->link_down_cnt) {
                                /* UP -> DOWN -> UP */
                                fnic->lport->host_stats.link_failure_count++;
                                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                               fnic_fc_trace_set_data(
+                                       fnic->lport->host->host_no,
+                                       FNIC_FC_LE,
+                                       "Link Status:UP_DOWN_UP",
+                                       strlen("Link_Status:UP_DOWN_UP")
+                                       );
                                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
                                             "link down\n");
                                fcoe_ctlr_link_down(&fnic->ctlr);
                                if (fnic->config.flags & VFCF_FIP_CAPABLE) {
                                        /* start FCoE VLAN discovery */
+                                       fnic_fc_trace_set_data(
+                                               fnic->lport->host->host_no,
+                                               FNIC_FC_LE,
+                                               "Link Status: UP_DOWN_UP_VLAN",
+                                               strlen(
+                                               "Link Status: UP_DOWN_UP_VLAN")
+                                               );
                                        fnic_fcoe_send_vlan_req(fnic);
                                        return;
                                }
@@ -88,22 +104,36 @@ void fnic_handle_link(struct work_struct *work)
                        } else
                                /* UP -> UP */
                                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+                               fnic_fc_trace_set_data(
+                                       fnic->lport->host->host_no, FNIC_FC_LE,
+                                       "Link Status: UP_UP",
+                                       strlen("Link Status: UP_UP"));
                }
        } else if (fnic->link_status) {
                /* DOWN -> UP */
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
                if (fnic->config.flags & VFCF_FIP_CAPABLE) {
                        /* start FCoE VLAN discovery */
+                               fnic_fc_trace_set_data(
+                               fnic->lport->host->host_no,
+                               FNIC_FC_LE, "Link Status: DOWN_UP_VLAN",
+                               strlen("Link Status: DOWN_UP_VLAN"));
                        fnic_fcoe_send_vlan_req(fnic);
                        return;
                }
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n");
+               fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_LE,
+                       "Link Status: DOWN_UP", strlen("Link Status: DOWN_UP"));
                fcoe_ctlr_link_up(&fnic->ctlr);
        } else {
                /* UP -> DOWN */
                fnic->lport->host_stats.link_failure_count++;
                spin_unlock_irqrestore(&fnic->fnic_lock, flags);
                FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n");
+               fnic_fc_trace_set_data(
+                       fnic->lport->host->host_no, FNIC_FC_LE,
+                       "Link Status: UP_DOWN",
+                       strlen("Link Status: UP_DOWN"));
                fcoe_ctlr_link_down(&fnic->ctlr);
        }
 
@@ -267,11 +297,6 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
 
        if (desc->fip_dtype == FIP_DT_FLOGI) {
 
-               shost_printk(KERN_DEBUG, lport->host,
-                         " FIP TYPE FLOGI: fab name:%llx "
-                         "vfid:%d map:%x\n",
-                         fip->sel_fcf->fabric_name, fip->sel_fcf->vfid,
-                         fip->sel_fcf->fc_map);
                if (dlen < sizeof(*els) + sizeof(*fh) + 1)
                        return 0;
 
@@ -616,6 +641,10 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb)
                                        "using UCSM\n");
                        goto drop;
                }
+               if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+                       FNIC_FC_RECV|0x80, (char *)skb->data, skb->len)) != 0) {
+                       printk(KERN_ERR "fnic ctlr frame trace error!!!");
+               }
                skb_queue_tail(&fnic->fip_frame_queue, skb);
                queue_work(fnic_fip_queue, &fnic->fip_frame_work);
                return 1;               /* let caller know packet was used */
@@ -844,6 +873,10 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
        }
        fr_dev(fp) = fnic->lport;
        spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+       if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_RECV,
+                                       (char *)skb->data, skb->len)) != 0) {
+               printk(KERN_ERR "fnic ctlr frame trace error!!!");
+       }
 
        skb_queue_tail(&fnic->frame_queue, skb);
        queue_work(fnic_event_queue, &fnic->frame_work);
@@ -951,6 +984,15 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
                vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
                vlan_hdr->h_vlan_encapsulated_proto = eth_hdr->h_proto;
                vlan_hdr->h_vlan_TCI = htons(fnic->vlan_id);
+               if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+                       FNIC_FC_SEND|0x80, (char *)eth_hdr, skb->len)) != 0) {
+                       printk(KERN_ERR "fnic ctlr frame trace error!!!");
+               }
+       } else {
+               if ((fnic_fc_trace_set_data(fnic->lport->host->host_no,
+                       FNIC_FC_SEND|0x80, (char *)skb->data, skb->len)) != 0) {
+                       printk(KERN_ERR "fnic ctlr frame trace error!!!");
+               }
        }
 
        pa = pci_map_single(fnic->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
@@ -1023,6 +1065,11 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
 
        pa = pci_map_single(fnic->pdev, eth_hdr, tot_len, PCI_DMA_TODEVICE);
 
+       if ((fnic_fc_trace_set_data(fnic->lport->host->host_no, FNIC_FC_SEND,
+                               (char *)eth_hdr, tot_len)) != 0) {
+               printk(KERN_ERR "fnic ctlr frame trace error!!!");
+       }
+
        spin_lock_irqsave(&fnic->wq_lock[0], flags);
 
        if (!vnic_wq_desc_avail(wq)) {
index 33e4ec2bfe734eefda1cd049bfde7f1112cf0ea2..8c56fdc3a4560adf69a82332545a1ed768bde1e7 100644 (file)
@@ -74,6 +74,11 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
                                        "for fnic trace buffer");
 
+unsigned int fnic_fc_trace_max_pages = 64;
+module_param(fnic_fc_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_fc_trace_max_pages,
+                "Total allocated memory pages for fc trace buffer");
+
 static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH;
 module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN");
@@ -111,7 +116,7 @@ static struct scsi_host_template fnic_host_template = {
        .change_queue_type = fc_change_queue_type,
        .this_id = -1,
        .cmd_per_lun = 3,
-       .can_queue = FNIC_MAX_IO_REQ,
+       .can_queue = FNIC_DFLT_IO_REQ,
        .use_clustering = ENABLE_CLUSTERING,
        .sg_tablesize = FNIC_MAX_SG_DESC_CNT,
        .max_sectors = 0xffff,
@@ -773,6 +778,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                shost_printk(KERN_INFO, fnic->lport->host,
                             "firmware uses non-FIP mode\n");
                fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
+               fnic->ctlr.state = FIP_ST_NON_FIP;
        }
        fnic->state = FNIC_IN_FC_MODE;
 
@@ -1033,11 +1039,20 @@ static int __init fnic_init_module(void)
        /* Allocate memory for trace buffer */
        err = fnic_trace_buf_init();
        if (err < 0) {
-               printk(KERN_ERR PFX "Trace buffer initialization Failed "
-                                 "Fnic Tracing utility is disabled\n");
+               printk(KERN_ERR PFX
+                      "Trace buffer initialization Failed. "
+                      "Fnic Tracing utility is disabled\n");
                fnic_trace_free();
        }
 
+    /* Allocate memory for fc trace buffer */
+       err = fnic_fc_trace_init();
+       if (err < 0) {
+               printk(KERN_ERR PFX "FC trace buffer initialization Failed "
+                      "FC frame tracing utility is disabled\n");
+               fnic_fc_trace_free();
+       }
+
        /* Create a cache for allocation of default size sgls */
        len = sizeof(struct fnic_dflt_sgl_list);
        fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -1118,6 +1133,7 @@ err_create_fnic_sgl_slab_max:
        kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
 err_create_fnic_sgl_slab_dflt:
        fnic_trace_free();
+       fnic_fc_trace_free();
        fnic_debugfs_terminate();
        return err;
 }
@@ -1135,6 +1151,7 @@ static void __exit fnic_cleanup_module(void)
        kmem_cache_destroy(fnic_io_req_cache);
        fc_release_transport(fnic_fc_transport);
        fnic_trace_free();
+       fnic_fc_trace_free();
        fnic_debugfs_terminate();
 }
 
index 0521436d05d64b64e108416f2702141d2e564240..ea28b5ca4c734a10da01237fcb880e2a5874c890 100644 (file)
@@ -1312,8 +1312,9 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
 
 cleanup_scsi_cmd:
                sc->result = DID_TRANSPORT_DISRUPTED << 16;
-               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
-                             " DID_TRANSPORT_DISRUPTED\n");
+               FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+                             "%s: sc duration = %lu DID_TRANSPORT_DISRUPTED\n",
+                             __func__, (jiffies - start_time));
 
                if (atomic64_read(&fnic->io_cmpl_skip))
                        atomic64_dec(&fnic->io_cmpl_skip);
@@ -1733,6 +1734,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        struct fnic_stats *fnic_stats;
        struct abort_stats *abts_stats;
        struct terminate_stats *term_stats;
+       enum fnic_ioreq_state old_ioreq_state;
        int tag;
        DECLARE_COMPLETION_ONSTACK(tm_done);
 
@@ -1793,6 +1795,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
         * the completion wont be done till mid-layer, since abort
         * has already started.
         */
+       old_ioreq_state = CMD_STATE(sc);
        CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
        CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
 
@@ -1816,6 +1819,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
                                    fc_lun.scsi_lun, io_req)) {
                spin_lock_irqsave(io_lock, flags);
+               if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+                       CMD_STATE(sc) = old_ioreq_state;
                io_req = (struct fnic_io_req *)CMD_SP(sc);
                if (io_req)
                        io_req->abts_done = NULL;
@@ -1859,12 +1864,8 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
        if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) {
                spin_unlock_irqrestore(io_lock, flags);
                if (task_req == FCPIO_ITMF_ABT_TASK) {
-                       FNIC_SCSI_DBG(KERN_INFO,
-                               fnic->lport->host, "Abort Driver Timeout\n");
                        atomic64_inc(&abts_stats->abort_drv_timeouts);
                } else {
-                       FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
-                               "Terminate Driver Timeout\n");
                        atomic64_inc(&term_stats->terminate_drv_timeouts);
                }
                CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
index e002e7187dc0d14972b7146ce9ea15b1e5dbdd4e..c77285926827838d851114da691e09ec70ae3228 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/errno.h>
 #include <linux/spinlock.h>
 #include <linux/kallsyms.h>
+#include <linux/time.h>
 #include "fnic_io.h"
 #include "fnic.h"
 
@@ -32,6 +33,16 @@ static DEFINE_SPINLOCK(fnic_trace_lock);
 static fnic_trace_dbg_t fnic_trace_entries;
 int fnic_tracing_enabled = 1;
 
+/* static char *fnic_fc_ctlr_trace_buf_p; */
+
+static int fc_trace_max_entries;
+static unsigned long fnic_fc_ctlr_trace_buf_p;
+static fnic_trace_dbg_t fc_trace_entries;
+int fnic_fc_tracing_enabled = 1;
+int fnic_fc_trace_cleared = 1;
+static DEFINE_SPINLOCK(fnic_fc_trace_lock);
+
+
 /*
  * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
  *
@@ -428,10 +439,10 @@ int fnic_trace_buf_init(void)
        }
        err = fnic_trace_debugfs_init();
        if (err < 0) {
-               printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+               pr_err("fnic: Failed to initialize debugfs for tracing\n");
                goto err_fnic_trace_debugfs_init;
        }
-       printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+       pr_info("fnic: Successfully Initialized Trace Buffer\n");
        return err;
 err_fnic_trace_debugfs_init:
        fnic_trace_free();
@@ -456,3 +467,314 @@ void fnic_trace_free(void)
        }
        printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
 }
+
+/*
+ * fnic_fc_ctlr_trace_buf_init -
+ * Initialize trace buffer to log fnic control frames
+ * Description:
+ * Initialize trace buffer data structure by allocating
+ * required memory for trace data as well as for Indexes.
+ * Frame size is 256 bytes and
+ * memory is allocated for 1024 entries of 256 bytes.
+ * Page_offset(Index) is set to the address of trace entry
+ * and page_offset is initialized by adding frame size
+ * to the previous page_offset entry.
+ */
+
+int fnic_fc_trace_init(void)
+{
+       unsigned long fc_trace_buf_head;
+       int err = 0;
+       int i;
+
+       fc_trace_max_entries = (fnic_fc_trace_max_pages * PAGE_SIZE)/
+                               FC_TRC_SIZE_BYTES;
+       fnic_fc_ctlr_trace_buf_p = (unsigned long)vmalloc(
+                                       fnic_fc_trace_max_pages * PAGE_SIZE);
+       if (!fnic_fc_ctlr_trace_buf_p) {
+               pr_err("fnic: Failed to allocate memory for "
+                      "FC Control Trace Buf\n");
+               err = -ENOMEM;
+               goto err_fnic_fc_ctlr_trace_buf_init;
+       }
+
+       memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+                       fnic_fc_trace_max_pages * PAGE_SIZE);
+
+       /* Allocate memory for page offset */
+       fc_trace_entries.page_offset = vmalloc(fc_trace_max_entries *
+                                               sizeof(unsigned long));
+       if (!fc_trace_entries.page_offset) {
+               pr_err("fnic:Failed to allocate memory for page_offset\n");
+               if (fnic_fc_ctlr_trace_buf_p) {
+                       pr_err("fnic: Freeing FC Control Trace Buf\n");
+                       vfree((void *)fnic_fc_ctlr_trace_buf_p);
+                       fnic_fc_ctlr_trace_buf_p = 0;
+               }
+               err = -ENOMEM;
+               goto err_fnic_fc_ctlr_trace_buf_init;
+       }
+       memset((void *)fc_trace_entries.page_offset, 0,
+              (fc_trace_max_entries * sizeof(unsigned long)));
+
+       fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+       fc_trace_buf_head = fnic_fc_ctlr_trace_buf_p;
+
+       /*
+       * Set up fc_trace_entries.page_offset field with memory location
+       * for every trace entry
+       */
+       for (i = 0; i < fc_trace_max_entries; i++) {
+               fc_trace_entries.page_offset[i] = fc_trace_buf_head;
+               fc_trace_buf_head += FC_TRC_SIZE_BYTES;
+       }
+       err = fnic_fc_trace_debugfs_init();
+       if (err < 0) {
+               pr_err("fnic: Failed to initialize FC_CTLR tracing.\n");
+               goto err_fnic_fc_ctlr_trace_debugfs_init;
+       }
+       pr_info("fnic: Successfully Initialized FC_CTLR Trace Buffer\n");
+       return err;
+
+err_fnic_fc_ctlr_trace_debugfs_init:
+       fnic_fc_trace_free();
+err_fnic_fc_ctlr_trace_buf_init:
+       return err;
+}
+
+/*
+ * Fnic_fc_ctlr_trace_free - Free memory of fnic_fc_ctlr trace data structures.
+ */
+void fnic_fc_trace_free(void)
+{
+       fnic_fc_tracing_enabled = 0;
+       fnic_fc_trace_debugfs_terminate();
+       if (fc_trace_entries.page_offset) {
+               vfree((void *)fc_trace_entries.page_offset);
+               fc_trace_entries.page_offset = NULL;
+       }
+       if (fnic_fc_ctlr_trace_buf_p) {
+               vfree((void *)fnic_fc_ctlr_trace_buf_p);
+               fnic_fc_ctlr_trace_buf_p = 0;
+       }
+       pr_info("fnic:Successfully FC_CTLR Freed Trace Buffer\n");
+}
+
+/*
+ * fnic_fc_ctlr_set_trace_data:
+ *       Maintain rd & wr idx accordingly and set data
+ * Passed parameters:
+ *       host_no: host number accociated with fnic
+ *       frame_type: send_frame, rece_frame or link event
+ *       fc_frame: pointer to fc_frame
+ *       frame_len: Length of the fc_frame
+ * Description:
+ *   This routine will get next available wr_idx and
+ *   copy all passed trace data to the buffer pointed by wr_idx
+ *   and increment wr_idx. It will also make sure that we dont
+ *   overwrite the entry which we are reading and also
+ *   wrap around if we reach the maximum entries.
+ * Returned Value:
+ *   It will return 0 for success or -1 for failure
+ */
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+                               char *frame, u32 fc_trc_frame_len)
+{
+       unsigned long flags;
+       struct fc_trace_hdr *fc_buf;
+       unsigned long eth_fcoe_hdr_len;
+       char *fc_trace;
+
+       if (fnic_fc_tracing_enabled == 0)
+               return 0;
+
+       spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+
+       if (fnic_fc_trace_cleared == 1) {
+               fc_trace_entries.rd_idx = fc_trace_entries.wr_idx = 0;
+               pr_info("fnic: Reseting the read idx\n");
+               memset((void *)fnic_fc_ctlr_trace_buf_p, 0,
+                               fnic_fc_trace_max_pages * PAGE_SIZE);
+               fnic_fc_trace_cleared = 0;
+       }
+
+       fc_buf = (struct fc_trace_hdr *)
+               fc_trace_entries.page_offset[fc_trace_entries.wr_idx];
+
+       fc_trace_entries.wr_idx++;
+
+       if (fc_trace_entries.wr_idx >= fc_trace_max_entries)
+               fc_trace_entries.wr_idx = 0;
+
+       if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+               fc_trace_entries.rd_idx++;
+               if (fc_trace_entries.rd_idx >= fc_trace_max_entries)
+                       fc_trace_entries.rd_idx = 0;
+       }
+
+       fc_buf->time_stamp = CURRENT_TIME;
+       fc_buf->host_no = host_no;
+       fc_buf->frame_type = frame_type;
+
+       fc_trace = (char *)FC_TRACE_ADDRESS(fc_buf);
+
+       /* During the receive path, we do not have eth hdr as well as fcoe hdr
+        * at trace entry point so we will stuff 0xff just to make it generic.
+        */
+       if (frame_type == FNIC_FC_RECV) {
+               eth_fcoe_hdr_len = sizeof(struct ethhdr) +
+                                       sizeof(struct fcoe_hdr);
+               fc_trc_frame_len = fc_trc_frame_len + eth_fcoe_hdr_len;
+               memset((char *)fc_trace, 0xff, eth_fcoe_hdr_len);
+               /* Copy the rest of data frame */
+               memcpy((char *)(fc_trace + eth_fcoe_hdr_len), (void *)frame,
+               min_t(u8, fc_trc_frame_len,
+                       (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
+       } else {
+               memcpy((char *)fc_trace, (void *)frame,
+               min_t(u8, fc_trc_frame_len,
+                       (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)));
+       }
+
+       /* Store the actual received length */
+       fc_buf->frame_len = fc_trc_frame_len;
+
+       spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+       return 0;
+}
+
+/*
+ * fnic_fc_ctlr_get_trace_data: Copy trace buffer to a memory file
+ * Passed parameter:
+ *       @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *       rdata_flag: 1 => Unformated file
+ *                   0 => formated file
+ * Description:
+ *       This routine will copy the trace data to memory file with
+ *       proper formatting and also copy to another memory
+ *       file without formatting for further procesing.
+ * Retrun Value:
+ *       Number of bytes that were dumped into fnic_dbgfs_t
+ */
+
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag)
+{
+       int rd_idx, wr_idx;
+       unsigned long flags;
+       int len = 0, j;
+       struct fc_trace_hdr *tdata;
+       char *fc_trace;
+
+       spin_lock_irqsave(&fnic_fc_trace_lock, flags);
+       if (fc_trace_entries.wr_idx == fc_trace_entries.rd_idx) {
+               spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+               pr_info("fnic: Buffer is empty\n");
+               return 0;
+       }
+       rd_idx = fc_trace_entries.rd_idx;
+       wr_idx = fc_trace_entries.wr_idx;
+       if (rdata_flag == 0) {
+               len += snprintf(fnic_dbgfs_prt->buffer + len,
+                       (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+                       "Time Stamp (UTC)\t\t"
+                       "Host No:   F Type:  len:     FCoE_FRAME:\n");
+       }
+
+       while (rd_idx != wr_idx) {
+               tdata = (struct fc_trace_hdr *)
+                       fc_trace_entries.page_offset[rd_idx];
+               if (!tdata) {
+                       pr_info("fnic: Rd data is NULL\n");
+                       spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+                       return 0;
+               }
+               if (rdata_flag == 0) {
+                       copy_and_format_trace_data(tdata,
+                               fnic_dbgfs_prt, &len, rdata_flag);
+               } else {
+                       fc_trace = (char *)tdata;
+                       for (j = 0; j < FC_TRC_SIZE_BYTES; j++) {
+                               len += snprintf(fnic_dbgfs_prt->buffer + len,
+                               (fnic_fc_trace_max_pages * PAGE_SIZE * 3)
+                               - len, "%02x", fc_trace[j] & 0xff);
+                       } /* for loop */
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                               (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+                               "\n");
+               }
+               rd_idx++;
+               if (rd_idx > (fc_trace_max_entries - 1))
+                       rd_idx = 0;
+       }
+
+       spin_unlock_irqrestore(&fnic_fc_trace_lock, flags);
+       return len;
+}
+
+/*
+ * copy_and_format_trace_data: Copy formatted data to char * buffer
+ * Passed Parameter:
+ *      @fc_trace_hdr_t: pointer to trace data
+ *      @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *      @orig_len: pointer to len
+ *      rdata_flag: 0 => Formated file, 1 => Unformated file
+ * Description:
+ *      This routine will format and copy the passed trace data
+ *      for formated file or unformated file accordingly.
+ */
+
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+                               fnic_dbgfs_t *fnic_dbgfs_prt, int *orig_len,
+                               u8 rdata_flag)
+{
+       struct tm tm;
+       int j, i = 1, len;
+       char *fc_trace, *fmt;
+       int ethhdr_len = sizeof(struct ethhdr) - 1;
+       int fcoehdr_len = sizeof(struct fcoe_hdr);
+       int fchdr_len = sizeof(struct fc_frame_header);
+       int max_size = fnic_fc_trace_max_pages * PAGE_SIZE * 3;
+
+       tdata->frame_type = tdata->frame_type & 0x7F;
+
+       len = *orig_len;
+
+       time_to_tm(tdata->time_stamp.tv_sec, 0, &tm);
+
+       fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x       %c%8x\t";
+       len += snprintf(fnic_dbgfs_prt->buffer + len,
+               (fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+               fmt,
+               tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
+               tm.tm_hour, tm.tm_min, tm.tm_sec,
+               tdata->time_stamp.tv_nsec, tdata->host_no,
+               tdata->frame_type, tdata->frame_len);
+
+       fc_trace = (char *)FC_TRACE_ADDRESS(tdata);
+
+       for (j = 0; j < min_t(u8, tdata->frame_len,
+               (u8)(FC_TRC_SIZE_BYTES - FC_TRC_HEADER_SIZE)); j++) {
+               if (tdata->frame_type == FNIC_FC_LE) {
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                               max_size - len, "%c", fc_trace[j]);
+               } else {
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                               max_size - len, "%02x", fc_trace[j] & 0xff);
+                       len += snprintf(fnic_dbgfs_prt->buffer + len,
+                               max_size - len, " ");
+                       if (j == ethhdr_len ||
+                               j == ethhdr_len + fcoehdr_len ||
+                               j == ethhdr_len + fcoehdr_len + fchdr_len ||
+                               (i > 3 && j%fchdr_len == 0)) {
+                               len += snprintf(fnic_dbgfs_prt->buffer
+                                       + len, (fnic_fc_trace_max_pages
+                                       * PAGE_SIZE * 3) - len,
+                                       "\n\t\t\t\t\t\t\t\t");
+                               i++;
+                       }
+               } /* end of else*/
+       } /* End of for loop*/
+       len += snprintf(fnic_dbgfs_prt->buffer + len,
+               max_size - len, "\n");
+       *orig_len = len;
+}
index d412f2ee3c4fec83ee58b52ec5b7370658e39fdf..a8aa0578fcb0a1d9a50cb14384360f79bce4f23d 100644 (file)
 #define __FNIC_TRACE_H__
 
 #define FNIC_ENTRY_SIZE_BYTES 64
+#define FC_TRC_SIZE_BYTES 256
+#define FC_TRC_HEADER_SIZE sizeof(struct fc_trace_hdr)
+
+/*
+ * Fisrt bit of FNIC_FC_RECV and FNIC_FC_SEND is used to represent the type
+ * of frame 1 => Eth frame, 0=> FC frame
+ */
+
+#define FNIC_FC_RECV 0x52 /* Character R */
+#define FNIC_FC_SEND 0x54 /* Character T */
+#define FNIC_FC_LE 0x4C /* Character L */
 
 extern ssize_t simple_read_from_buffer(void __user *to,
                                          size_t count,
@@ -30,6 +41,10 @@ extern unsigned int fnic_trace_max_pages;
 extern int fnic_tracing_enabled;
 extern unsigned int trace_max_pages;
 
+extern unsigned int fnic_fc_trace_max_pages;
+extern int fnic_fc_tracing_enabled;
+extern int fnic_fc_trace_cleared;
+
 typedef struct fnic_trace_dbg {
        int wr_idx;
        int rd_idx;
@@ -56,6 +71,16 @@ struct fnic_trace_data {
 
 typedef struct fnic_trace_data fnic_trace_data_t;
 
+struct fc_trace_hdr {
+       struct timespec time_stamp;
+       u32 host_no;
+       u8 frame_type;
+       u8 frame_len;
+} __attribute__((__packed__));
+
+#define FC_TRACE_ADDRESS(a) \
+       ((unsigned long)(a) + sizeof(struct fc_trace_hdr))
+
 #define FNIC_TRACE_ENTRY_SIZE \
                  (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
 
@@ -88,4 +113,17 @@ int fnic_debugfs_init(void);
 void fnic_debugfs_terminate(void);
 int fnic_trace_debugfs_init(void);
 void fnic_trace_debugfs_terminate(void);
+
+/* Fnic FC CTLR Trace releated function */
+int fnic_fc_trace_init(void);
+void fnic_fc_trace_free(void);
+int fnic_fc_trace_set_data(u32 host_no, u8 frame_type,
+                               char *frame, u32 fc_frame_len);
+int fnic_fc_trace_get_data(fnic_dbgfs_t *fnic_dbgfs_prt, u8 rdata_flag);
+void copy_and_format_trace_data(struct fc_trace_hdr *tdata,
+                               fnic_dbgfs_t *fnic_dbgfs_prt,
+                               int *len, u8 rdata_flag);
+int fnic_fc_trace_debugfs_init(void);
+void fnic_fc_trace_debugfs_terminate(void);
+
 #endif
index 7176365e916b7aff6ca310baa9e75044dab9cf11..a1bc8ca958e11341c9995b2918475d56141afbcd 100644 (file)
  *     
  */
 
-/*
- * $Log: generic_NCR5380.c,v $
- */
-
 /* settings for DTC3181E card with only Mustek scanner attached */
 #define USLEEP
 #define USLEEP_POLL    1
index 1bcdb7beb77b37c08a6508318857e4f92363f0a9..703adf78e0b2578532e899ca778044162c2eaec3 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: generic_NCR5380.h,v $
- */
-
 #ifndef GENERIC_NCR5380_H
 #define GENERIC_NCR5380_H
 
@@ -58,8 +54,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
 #define CAN_QUEUE 16
 #endif
 
-#ifndef HOSTS_C
-
 #define __STRVAL(x) #x
 #define STRVAL(x) __STRVAL(x)
 
@@ -131,7 +125,6 @@ static const char* generic_NCR5380_info(struct Scsi_Host *);
 #define BOARD_NCR53C400A 2
 #define BOARD_DTC3181E 3
 
-#endif /* else def HOSTS_C */
 #endif /* ndef ASM */
 #endif /* GENERIC_NCR5380_H */
 
index 9a6e4a2cd072421df1980edfa4c8f914398b3991..5858600bfe593255592dd009fcdb02b82463f04d 100644 (file)
@@ -115,9 +115,15 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
        {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
+       {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
        {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
@@ -165,9 +171,15 @@ static struct board_type products[] = {
        {0x21C3103C, "Smart Array", &SA5_access},
        {0x21C4103C, "Smart Array", &SA5_access},
        {0x21C5103C, "Smart Array", &SA5_access},
+       {0x21C6103C, "Smart Array", &SA5_access},
        {0x21C7103C, "Smart Array", &SA5_access},
        {0x21C8103C, "Smart Array", &SA5_access},
        {0x21C9103C, "Smart Array", &SA5_access},
+       {0x21CA103C, "Smart Array", &SA5_access},
+       {0x21CB103C, "Smart Array", &SA5_access},
+       {0x21CC103C, "Smart Array", &SA5_access},
+       {0x21CD103C, "Smart Array", &SA5_access},
+       {0x21CE103C, "Smart Array", &SA5_access},
        {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
        {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
        {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
@@ -2836,6 +2848,8 @@ static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
 
        /* Get the list of physical devices */
        physicals = kzalloc(reportsize, GFP_KERNEL);
+       if (physicals == NULL)
+               return 0;
        if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
                reportsize, extended)) {
                dev_err(&h->pdev->dev,
@@ -2963,19 +2977,24 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
 {
        int rc;
+       int hba_mode_enabled;
        struct bmic_controller_parameters *ctlr_params;
        ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
                GFP_KERNEL);
 
        if (!ctlr_params)
-               return 0;
+               return -ENOMEM;
        rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
                sizeof(struct bmic_controller_parameters));
-       if (rc != 0) {
+       if (rc) {
                kfree(ctlr_params);
-               return 0;
+               return rc;
        }
-       return ctlr_params->nvram_flags & (1 << 3) ? 1 : 0;
+
+       hba_mode_enabled =
+               ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
+       kfree(ctlr_params);
+       return hba_mode_enabled;
 }
 
 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
@@ -3001,7 +3020,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
        int i, n_ext_target_devs, ndevs_to_allocate;
        int raid_ctlr_position;
-       u8 rescan_hba_mode;
+       int rescan_hba_mode;
        DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
 
        currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
@@ -3016,6 +3035,8 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
        memset(lunzerobits, 0, sizeof(lunzerobits));
 
        rescan_hba_mode = hpsa_hba_mode_enabled(h);
+       if (rescan_hba_mode < 0)
+               goto out;
 
        if (!h->hba_mode_enabled && rescan_hba_mode)
                dev_warn(&h->pdev->dev, "HBA mode enabled\n");
index 44235a27e1b6d0541a78c1442bfdda4794caabb5..1e3cf33a82cf12750fa9336e087774dcb7c5becd 100644 (file)
@@ -90,6 +90,7 @@ struct bmic_controller_parameters {
        u8   automatic_drive_slamming;
        u8   reserved1;
        u8   nvram_flags;
+#define HBA_MODE_ENABLED_FLAG (1 << 3)
        u8   cache_nvram_flags;
        u8   drive_config_flags;
        u16  reserved2;
index 26dc005bb0f0f347eeecf2d4d0cb335912d55ee5..ecd7bd304efebb51ce99580e7810edb6a3cf84be 100644 (file)
@@ -1442,9 +1442,9 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
                conn->task = NULL;
        }
        /* regular RX path uses back_lock */
-       spin_lock_bh(&conn->session->back_lock);
+       spin_lock(&conn->session->back_lock);
        __iscsi_put_task(task);
-       spin_unlock_bh(&conn->session->back_lock);
+       spin_unlock(&conn->session->back_lock);
        return rc;
 }
 
index 6bb51f8e3c1b096a5a2d048d2ff4e878b310c8ef..393662c24df5086014277cbe411528ca6e6680d2 100644 (file)
@@ -265,6 +265,16 @@ lpfc_sli4_eq_get(struct lpfc_queue *q)
                return NULL;
 
        q->hba_index = idx;
+
+       /*
+        * insert barrier for instruction interlock : data from the hardware
+        * must have the valid bit checked before it can be copied and acted
+        * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
+        * instructions allowing action on content before valid bit checked,
+        * add barrier here as well. May not be needed as "content" is a
+        * single 32-bit entity here (vs multi word structure for cq's).
+        */
+       mb();
        return eqe;
 }
 
@@ -370,6 +380,17 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
 
        cqe = q->qe[q->hba_index].cqe;
        q->hba_index = idx;
+
+       /*
+        * insert barrier for instruction interlock : data from the hardware
+        * must have the valid bit checked before it can be copied and acted
+        * upon. Speculative instructions were allowing a bcopy at the start
+        * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
+        * after our return, to copy data before the valid bit check above
+        * was done. As such, some of the copied data was stale. The barrier
+        * ensures the check is before any data is copied.
+        */
+       mb();
        return cqe;
 }
 
index f5cdc68cd5b6f194f9f1f5e573e1c7747a70bb48..6a039eb1cbce3e808533ec4dabf2041696aa5ed5 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: mac_NCR5380.c,v $
- */
-
 #include <linux/types.h>
 #include <linux/stddef.h>
 #include <linux/ctype.h>
 
 #include "NCR5380.h"
 
-#if 0
-#define NDEBUG (NDEBUG_INTR | NDEBUG_PSEUDO_DMA | NDEBUG_ARBITRATION | NDEBUG_SELECTION | NDEBUG_RESELECTION)
-#else
-#define NDEBUG (NDEBUG_ABORT)
-#endif
-
 #define RESET_BOOT
 #define DRIVER_SETUP
 
index 7dc62fce1c4c6739ec1f21faa59be2aebf55f8ee..06969b06e54bee4df0d474b69f23a2bd724cedcc 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: cumana_NCR5380.h,v $
- */
-
 #ifndef MAC_NCR5380_H
 #define MAC_NCR5380_H
 
@@ -51,8 +47,6 @@
 
 #include <scsi/scsicam.h>
 
-#ifndef HOSTS_C
-
 #define NCR5380_implementation_fields \
     int port, ctrl
 
 #define NCR5380_show_info macscsi_show_info
 #define NCR5380_write_info macscsi_write_info
 
-#define BOARD_NORMAL   0
-#define BOARD_NCR53C400        1
-
-#endif /* ndef HOSTS_C */
 #endif /* ndef ASM */
 #endif /* MAC_NCR5380_H */
 
index d84d02c2aad9f53554362d0c11faa9d5b40af978..112799b131a93253d1b06170e2d7e1d5a1d998c2 100644 (file)
@@ -3061,7 +3061,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
        u32 cur_state;
        u32 abs_state, curr_abs_state;
 
-       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) & MFI_STATE_MASK;
+       abs_state = instance->instancet->read_fw_status_reg(instance->reg_set);
+       fw_state = abs_state & MFI_STATE_MASK;
 
        if (fw_state != MFI_STATE_READY)
                printk(KERN_INFO "megasas: Waiting for FW to come to ready"
@@ -3069,9 +3070,6 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
 
        while (fw_state != MFI_STATE_READY) {
 
-               abs_state =
-               instance->instancet->read_fw_status_reg(instance->reg_set);
-
                switch (fw_state) {
 
                case MFI_STATE_FAULT:
@@ -3223,10 +3221,8 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
                 * The cur_state should not last for more than max_wait secs
                 */
                for (i = 0; i < (max_wait * 1000); i++) {
-                       fw_state = instance->instancet->read_fw_status_reg(instance->reg_set) &
-                                       MFI_STATE_MASK ;
-               curr_abs_state =
-               instance->instancet->read_fw_status_reg(instance->reg_set);
+                       curr_abs_state = instance->instancet->
+                               read_fw_status_reg(instance->reg_set);
 
                        if (abs_state == curr_abs_state) {
                                msleep(1);
@@ -3242,6 +3238,9 @@ megasas_transition_to_ready(struct megasas_instance *instance, int ocr)
                               "in %d secs\n", fw_state, max_wait);
                        return -ENODEV;
                }
+
+               abs_state = curr_abs_state;
+               fw_state = curr_abs_state & MFI_STATE_MASK;
        }
        printk(KERN_INFO "megasas: FW now in Ready state\n");
 
index bde63f7452bdd368a2fc06db8c83f72a7a049cbe..8b88118e20e6cb0bcb0e1145761908b49da01be9 100644 (file)
@@ -1739,14 +1739,14 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
                        list_for_each_entry_safe(chain_req, next,
                            &ioc->scsi_lookup[i].chain_list, tracker_list) {
                                list_del_init(&chain_req->tracker_list);
-                               list_add_tail(&chain_req->tracker_list,
+                               list_add(&chain_req->tracker_list,
                                    &ioc->free_chain_list);
                        }
                }
                ioc->scsi_lookup[i].cb_idx = 0xFF;
                ioc->scsi_lookup[i].scmd = NULL;
                ioc->scsi_lookup[i].direct_io = 0;
-               list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+               list_add(&ioc->scsi_lookup[i].tracker_list,
                    &ioc->free_list);
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
@@ -1764,13 +1764,13 @@ mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
                /* hi-priority */
                i = smid - ioc->hi_priority_smid;
                ioc->hpr_lookup[i].cb_idx = 0xFF;
-               list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+               list_add(&ioc->hpr_lookup[i].tracker_list,
                    &ioc->hpr_free_list);
        } else if (smid <= ioc->hba_queue_depth) {
                /* internal queue */
                i = smid - ioc->internal_smid;
                ioc->internal_lookup[i].cb_idx = 0xFF;
-               list_add_tail(&ioc->internal_lookup[i].tracker_list,
+               list_add(&ioc->internal_lookup[i].tracker_list,
                    &ioc->internal_free_list);
        }
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
index 1f2ac3a286217ebd7a9d5b9c8174d1d81a34fb4f..fd3b998c75b1cfa37ede0887f3725d13567ba7c9 100644 (file)
@@ -1065,7 +1065,7 @@ void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
     u32 reply);
 int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
        uint channel, uint id, uint lun, u8 type, u16 smid_task,
-       ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+       ulong timeout, enum mutex_type m_type);
 void mpt2sas_scsih_set_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle);
 void mpt2sas_expander_remove(struct MPT2SAS_ADAPTER *ioc, u64 sas_address);
index b7f887c9b0bfdae9460ef64f39c43a04a5e23bca..62df8f9d4271c2ef08954345071be52aaa9acbb7 100644 (file)
@@ -987,7 +987,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command karg,
                        mpt2sas_scsih_issue_tm(ioc,
                            le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
                            0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 10,
-                           0, TM_MUTEX_ON);
+                           TM_MUTEX_ON);
                        ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
                } else
                        mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
index 6fd7d40b2c4dea102e15a2e9c76fef3500c09435..5055f925d2cd772e97502479dea9eb1c67faf46c 100644 (file)
@@ -2368,7 +2368,6 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @smid_task: smid assigned to the task
  * @timeout: timeout in seconds
- * @serial_number: the serial_number from scmd
  * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
  * Context: user
  *
@@ -2381,7 +2380,7 @@ mpt2sas_scsih_clear_tm_flag(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 int
 mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint channel,
     uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
-       unsigned long serial_number, enum mutex_type m_type)
+       enum mutex_type m_type)
 {
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2634,8 +2633,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
        handle = sas_device_priv_data->sas_target->handle;
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-           scmd->serial_number, TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2696,8 +2694,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
 
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
-           TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2757,7 +2754,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
 
        r = mpt2sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
-           30, 0, TM_MUTEX_ON);
+           30, TM_MUTEX_ON);
 
  out:
        starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3953,9 +3950,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
 static int
-_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
-       struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
        struct MPT2SAS_DEVICE *sas_device_priv_data;
        struct MPT2SAS_TARGET *sas_target_priv_data;
        struct _raid_device *raid_device;
@@ -3963,7 +3960,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
        u32 mpi_control;
        u16 smid;
 
-       scmd->scsi_done = done;
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -4039,7 +4035,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
            MPT_TARGET_FLAGS_RAID_COMPONENT)
                mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
        else
-               mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+       mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
        mpi_request->DevHandle =
            cpu_to_le16(sas_device_priv_data->sas_target->handle);
        mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
@@ -4083,8 +4079,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
        return SCSI_MLQUEUE_HOST_BUSY;
 }
 
-static DEF_SCSI_QCMD(_scsih_qcmd)
-
 /**
  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
  * @sense_buffer: sense data returned by target
@@ -5880,7 +5874,7 @@ broadcast_aen_retry:
 
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
                r = mpt2sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
                    TM_MUTEX_OFF);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
@@ -5922,7 +5916,7 @@ broadcast_aen_retry:
 
                r = mpt2sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
                    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-                   scmd->serial_number, TM_MUTEX_OFF);
+                   TM_MUTEX_OFF);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
                            "mpt2sas_scsih_issue_tm: ABORT_TASK: FAILED : "
index 0ebf5d913c80f916835fa42178af2e41815c7bdd..9b90a6fef706954b2789e97482f23306a6a0ce55 100644 (file)
@@ -993,7 +993,7 @@ void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
 
 int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
        uint channel, uint id, uint lun, u8 type, u16 smid_task,
-       ulong timeout, unsigned long serial_number,  enum mutex_type m_type);
+       ulong timeout, enum mutex_type m_type);
 void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
 void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
index 9b89de14a0a3dbc8a6758525ed18c25463808540..ba9cbe598a9127ce2cd166c9fa1d9bd1b04275b1 100644 (file)
@@ -980,7 +980,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
                        mpt3sas_scsih_issue_tm(ioc,
                            le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
                            0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
-                           0, TM_MUTEX_ON);
+                           TM_MUTEX_ON);
                } else
                        mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
                            FORCE_BIG_HAMMER);
index a961fe11b5275f0b99d89dd69fbd2ef153618ba4..18e713db1d328f10c6bd56848ac565ff36f2a958 100644 (file)
@@ -2029,7 +2029,6 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  * @smid_task: smid assigned to the task
  * @timeout: timeout in seconds
- * @serial_number: the serial_number from scmd
  * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
  * Context: user
  *
@@ -2042,7 +2041,7 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 int
 mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
        uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
-       unsigned long serial_number, enum mutex_type m_type)
+       enum mutex_type m_type)
 {
        Mpi2SCSITaskManagementRequest_t *mpi_request;
        Mpi2SCSITaskManagementReply_t *mpi_reply;
@@ -2293,8 +2292,7 @@ _scsih_abort(struct scsi_cmnd *scmd)
        handle = sas_device_priv_data->sas_target->handle;
        r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-           scmd->serial_number, TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
@@ -2353,8 +2351,7 @@ _scsih_dev_reset(struct scsi_cmnd *scmd)
 
        r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, scmd->device->lun,
-           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
-           TM_MUTEX_ON);
+           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON);
 
  out:
        sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
@@ -2414,7 +2411,7 @@ _scsih_target_reset(struct scsi_cmnd *scmd)
 
        r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
            scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
-           30, 0, TM_MUTEX_ON);
+           30, TM_MUTEX_ON);
 
  out:
        starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
@@ -3518,7 +3515,7 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
 
 
 /**
- * _scsih_qcmd_lck - main scsi request entry point
+ * _scsih_qcmd - main scsi request entry point
  * @scmd: pointer to scsi command object
  * @done: function pointer to be invoked on completion
  *
@@ -3529,9 +3526,9 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
 static int
-_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
-       struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+       struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
        struct MPT3SAS_DEVICE *sas_device_priv_data;
        struct MPT3SAS_TARGET *sas_target_priv_data;
        Mpi2SCSIIORequest_t *mpi_request;
@@ -3544,7 +3541,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
                scsi_print_command(scmd);
 #endif
 
-       scmd->scsi_done = done;
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
                scmd->result = DID_NO_CONNECT << 16;
@@ -3659,8 +3655,6 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
  out:
        return SCSI_MLQUEUE_HOST_BUSY;
 }
-static DEF_SCSI_QCMD(_scsih_qcmd)
-
 
 /**
  * _scsih_normalize_sense - normalize descriptor and fixed format sense data
@@ -5425,7 +5419,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
                spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
                r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
-                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30,
                    TM_MUTEX_OFF);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
@@ -5467,7 +5461,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
 
                r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
                    sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
-                   scmd->serial_number, TM_MUTEX_OFF);
+                   TM_MUTEX_OFF);
                if (r == FAILED) {
                        sdev_printk(KERN_WARNING, sdev,
                            "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
index 5ff978be249d22b37df66601787cceca92933047..eacee48a955c56903b98b3f1c61b55219df0e2c9 100644 (file)
@@ -728,6 +728,15 @@ static struct pci_device_id mvs_pci_table[] = {
                .class_mask     = 0,
                .driver_data    = chip_9485,
        },
+       {
+               .vendor         = PCI_VENDOR_ID_MARVELL_EXT,
+               .device         = 0x9485,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = 0x9485,
+               .class          = 0,
+               .class_mask     = 0,
+               .driver_data    = chip_9485,
+       },
        { PCI_VDEVICE(OCZ, 0x1021), chip_9485}, /* OCZ RevoDrive3 */
        { PCI_VDEVICE(OCZ, 0x1022), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
        { PCI_VDEVICE(OCZ, 0x1040), chip_9485}, /* OCZ RevoDrive3/zDriveR4 (exact model unknown) */
index 3721342835e9a1b6ebce6e5906747039c129bc48..aa528f53c5339582bcf74fd15aaa4a009cf82e83 100644 (file)
@@ -129,8 +129,6 @@ static int pas16_bus_reset(Scsi_Cmnd *);
 #define CAN_QUEUE 32 
 #endif
 
-#ifndef HOSTS_C
-
 #define NCR5380_implementation_fields \
     volatile unsigned short io_port
 
@@ -171,6 +169,5 @@ static int pas16_bus_reset(Scsi_Cmnd *);
    
 #define PAS16_IRQS 0xd4a8 
 
-#endif /* else def HOSTS_C */
 #endif /* ndef ASM */
 #endif /* PAS16_H */
index 28b4e813915352674a1a5aaf9e722fed2259f993..fe5eee4d0a1133645116fb7af6e799e0154bddf7 100644 (file)
@@ -395,6 +395,8 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
        payload.offset = 0;
        payload.length = 4096;
        payload.func_specific = kzalloc(4096, GFP_KERNEL);
+       if (!payload.func_specific)
+               return -ENOMEM;
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
        virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
@@ -402,6 +404,7 @@ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev,
                bios_index++)
                str += sprintf(str, "%c",
                        *((u8 *)((u8 *)virt_addr+bios_index)));
+       kfree(payload.func_specific);
        return str - buf;
 }
 static DEVICE_ATTR(bios_version, S_IRUGO, pm8001_ctl_bios_version_show, NULL);
index 07befcf365b888fc961dab15f840471d4472373e..16fe5196e6d9e66ae610074cdc965ff52c28607f 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -664,7 +664,7 @@ do_read:
                }
 
                rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
-                   addr, offset, SFP_BLOCK_SIZE, 0);
+                   addr, offset, SFP_BLOCK_SIZE, BIT_1);
                if (rval != QLA_SUCCESS) {
                        ql_log(ql_log_warn, vha, 0x706d,
                            "Unable to read SFP data (%x/%x/%x).\n", rval,
@@ -1495,7 +1495,7 @@ qla2x00_fw_dump_size_show(struct device *dev, struct device_attribute *attr,
 
        if (!ha->fw_dumped)
                size = 0;
-       else if (IS_QLA82XX(ha))
+       else if (IS_P3P_TYPE(ha))
                size = ha->md_template_size + ha->md_dump_size;
        else
                size = ha->fw_dump_len;
index 71ff340f6de4359754559a61a173196c4da85bff..524f9eb7fcd12d3d6d5b86c7e7ab6de37a8254ad 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2012 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -2054,9 +2054,49 @@ qla26xx_serdes_op(struct fc_bsg_job *bsg_job)
                bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
                break;
        default:
-               ql_log(ql_log_warn, vha, 0x708c,
+               ql_dbg(ql_dbg_user, vha, 0x708c,
                    "Unknown serdes cmd %x.\n", sr.cmd);
-               rval = -EDOM;
+               rval = -EINVAL;
+               break;
+       }
+
+       bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] =
+           rval ? EXT_STATUS_MAILBOX : 0;
+
+       bsg_job->reply_len = sizeof(struct fc_bsg_reply);
+       bsg_job->reply->result = DID_OK << 16;
+       bsg_job->job_done(bsg_job);
+       return 0;
+}
+
+static int
+qla8044_serdes_op(struct fc_bsg_job *bsg_job)
+{
+       struct Scsi_Host *host = bsg_job->shost;
+       scsi_qla_host_t *vha = shost_priv(host);
+       int rval = 0;
+       struct qla_serdes_reg_ex sr;
+
+       memset(&sr, 0, sizeof(sr));
+
+       sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+           bsg_job->request_payload.sg_cnt, &sr, sizeof(sr));
+
+       switch (sr.cmd) {
+       case INT_SC_SERDES_WRITE_REG:
+               rval = qla8044_write_serdes_word(vha, sr.addr, sr.val);
+               bsg_job->reply->reply_payload_rcv_len = 0;
+               break;
+       case INT_SC_SERDES_READ_REG:
+               rval = qla8044_read_serdes_word(vha, sr.addr, &sr.val);
+               sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+                   bsg_job->reply_payload.sg_cnt, &sr, sizeof(sr));
+               bsg_job->reply->reply_payload_rcv_len = sizeof(sr);
+               break;
+       default:
+               ql_dbg(ql_dbg_user, vha, 0x70cf,
+                   "Unknown serdes cmd %x.\n", sr.cmd);
+               rval = -EINVAL;
                break;
        }
 
@@ -2121,6 +2161,9 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
        case QL_VND_SERDES_OP:
                return qla26xx_serdes_op(bsg_job);
 
+       case QL_VND_SERDES_OP_EX:
+               return qla8044_serdes_op(bsg_job);
+
        default:
                return -ENOSYS;
        }
index e5c2126221e9359b406d5488df6d1d31a1739195..d38f9efa56fa57e233804be207c869b87e43fe99 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -24,6 +24,7 @@
 #define QL_VND_READ_I2C                0x11
 #define QL_VND_FX00_MGMT_CMD   0x12
 #define QL_VND_SERDES_OP       0x13
+#define        QL_VND_SERDES_OP_EX     0x14
 
 /* BSG Vendor specific subcode returns */
 #define EXT_STATUS_OK                  0
@@ -225,4 +226,10 @@ struct qla_serdes_reg {
        uint16_t val;
 } __packed;
 
+struct qla_serdes_reg_ex {
+       uint16_t cmd;
+       uint32_t addr;
+       uint32_t val;
+} __packed;
+
 #endif
index 97255f7c39754127cbcd4247f90ccd1c5788b512..c72ee97bf3f713b8ad0998ca73d1a06c1a7da993 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -15,7 +15,7 @@
  * |                              |                    | 0x0144,0x0146 |
  * |                              |                    | 0x015b-0x0160 |
  * |                              |                    | 0x016e-0x0170 |
- * | Mailbox commands             |       0x1187       | 0x1018-0x1019 |
+ * | Mailbox commands             |       0x118d       | 0x1018-0x1019 |
  * |                              |                    | 0x10ca         |
  * |                              |                    | 0x1115-0x1116  |
  * |                              |                    | 0x111a-0x111b |
  * |                              |                    | 0x70ad-0x70ae  |
  * |                              |                    | 0x70d7-0x70db  |
  * |                              |                    | 0x70de-0x70df  |
- * | Task Management              |       0x803d       | 0x8025-0x8026  |
- * |                              |                    | 0x800b,0x8039  |
+ * | Task Management              |       0x803d       | 0x8000,0x800b  |
+ * |                              |                    | 0x8019         |
+ * |                              |                    | 0x8025,0x8026  |
+ * |                              |                    | 0x8031,0x8032  |
+ * |                              |                    | 0x8039,0x803c  |
  * | AER/EEH                      |       0x9011       |               |
  * | Virtual Port                 |       0xa007       |               |
- * | ISP82XX Specific             |       0xb14c       | 0xb002,0xb024  |
+ * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
  * |                              |                    | 0xb09e,0xb0ae  |
+ * |                             |                    | 0xb0c3,0xb0c6  |
  * |                              |                    | 0xb0e0-0xb0ef  |
  * |                              |                    | 0xb085,0xb0dc  |
  * |                              |                    | 0xb107,0xb108  |
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149                |
  * | MultiQ                       |       0xc00c       |               |
- * | Misc                         |       0xd2ff       | 0xd017-0xd019 |
+ * | Misc                         |       0xd212       | 0xd017-0xd019 |
  * |                              |                    | 0xd020                |
- * |                              |                    | 0xd02e-0xd0ff |
+ * |                              |                    | 0xd030-0xd0ff |
  * |                              |                    | 0xd101-0xd1fe |
- * |                              |                    | 0xd212-0xd2fe |
- * | Target Mode                 |       0xe070       | 0xe021         |
+ * |                              |                    | 0xd213-0xd2fe |
+ * | Target Mode                 |       0xe078       |                |
  * | Target Mode Management      |       0xf072       | 0xf002-0xf003  |
  * |                              |                    | 0xf046-0xf049  |
  * | Target Mode Task Management  |      0x1000b      |                |
@@ -277,9 +281,15 @@ qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
        if (rval != QLA_SUCCESS)
                return rval;
 
+       set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
        /* External Memory. */
-       return qla24xx_dump_ram(ha, 0x100000, *nxt,
+       rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
            ha->fw_memory_size - 0x100000 + 1, nxt);
+       if (rval == QLA_SUCCESS)
+               set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
+
+       return rval;
 }
 
 static uint32_t *
@@ -296,23 +306,15 @@ qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
        return buf;
 }
 
-int
-qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
+void
+qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
 {
-       int rval = QLA_SUCCESS;
-       uint32_t cnt;
-
        WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
-       for (cnt = 30000;
-           ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
-           rval == QLA_SUCCESS; cnt--) {
-               if (cnt)
-                       udelay(100);
-               else
-                       rval = QLA_FUNCTION_TIMEOUT;
-       }
 
-       return rval;
+       /* 100 usec delay is sufficient enough for hardware to pause RISC */
+       udelay(100);
+       if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
+               set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
 }
 
 int
@@ -320,10 +322,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
 {
        int rval = QLA_SUCCESS;
        uint32_t cnt;
-       uint16_t mb0, wd;
+       uint16_t wd;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
 
-       /* Reset RISC. */
+       /*
+        * Reset RISC. The delay is dependent on system architecture.
+        * Driver can proceed with the reset sequence after waiting
+        * for a timeout period.
+        */
        WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
        for (cnt = 0; cnt < 30000; cnt++) {
                if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
@@ -331,19 +337,14 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
 
                udelay(10);
        }
+       if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
+               set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
 
        WRT_REG_DWORD(&reg->ctrl_status,
            CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
        pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
 
        udelay(100);
-       /* Wait for firmware to complete NVRAM accesses. */
-       mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
-       for (cnt = 10000 ; cnt && mb0; cnt--) {
-               udelay(5);
-               mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
-               barrier();
-       }
 
        /* Wait for soft-reset to complete. */
        for (cnt = 0; cnt < 30000; cnt++) {
@@ -353,16 +354,21 @@ qla24xx_soft_reset(struct qla_hw_data *ha)
 
                udelay(10);
        }
+       if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
+               set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
+
        WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
        RD_REG_DWORD(&reg->hccr);             /* PCI Posting. */
 
-       for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+       for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
            rval == QLA_SUCCESS; cnt--) {
                if (cnt)
-                       udelay(100);
+                       udelay(10);
                else
                        rval = QLA_FUNCTION_TIMEOUT;
        }
+       if (rval == QLA_SUCCESS)
+               set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
 
        return rval;
 }
@@ -659,12 +665,13 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
 
        if (rval != QLA_SUCCESS) {
                ql_log(ql_log_warn, vha, 0xd000,
-                   "Failed to dump firmware (%x).\n", rval);
+                   "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
+                   rval, ha->fw_dump_cap_flags);
                ha->fw_dumped = 0;
        } else {
                ql_log(ql_log_info, vha, 0xd001,
-                   "Firmware dump saved to temp buffer (%ld/%p).\n",
-                   vha->host_no, ha->fw_dump);
+                   "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
+                   vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
                ha->fw_dumped = 1;
                qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
        }
@@ -1053,6 +1060,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        risc_address = ext_mem_cnt = 0;
        flags = 0;
+       ha->fw_dump_cap_flags = 0;
 
        if (!hardware_locked)
                spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1075,10 +1083,11 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
 
-       /* Pause RISC. */
-       rval = qla24xx_pause_risc(reg);
-       if (rval != QLA_SUCCESS)
-               goto qla24xx_fw_dump_failed_0;
+       /*
+        * Pause RISC. No need to track timeout, as resetting the chip
+        * is the right approach incase of pause timeout
+        */
+       qla24xx_pause_risc(reg, ha);
 
        /* Host interface registers. */
        dmp_reg = &reg->flash_addr;
@@ -1302,6 +1311,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        risc_address = ext_mem_cnt = 0;
        flags = 0;
+       ha->fw_dump_cap_flags = 0;
 
        if (!hardware_locked)
                spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1325,10 +1335,11 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
 
-       /* Pause RISC. */
-       rval = qla24xx_pause_risc(reg);
-       if (rval != QLA_SUCCESS)
-               goto qla25xx_fw_dump_failed_0;
+       /*
+        * Pause RISC. No need to track timeout, as resetting the chip
+        * is the right approach incase of pause timeout
+        */
+       qla24xx_pause_risc(reg, ha);
 
        /* Host/Risc registers. */
        iter_reg = fw->host_risc_reg;
@@ -1619,6 +1630,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        risc_address = ext_mem_cnt = 0;
        flags = 0;
+       ha->fw_dump_cap_flags = 0;
 
        if (!hardware_locked)
                spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1641,10 +1653,11 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
 
-       /* Pause RISC. */
-       rval = qla24xx_pause_risc(reg);
-       if (rval != QLA_SUCCESS)
-               goto qla81xx_fw_dump_failed_0;
+       /*
+        * Pause RISC. No need to track timeout, as resetting the chip
+        * is the right approach incase of pause timeout
+        */
+       qla24xx_pause_risc(reg, ha);
 
        /* Host/Risc registers. */
        iter_reg = fw->host_risc_reg;
@@ -1938,6 +1951,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        risc_address = ext_mem_cnt = 0;
        flags = 0;
+       ha->fw_dump_cap_flags = 0;
 
        if (!hardware_locked)
                spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1959,10 +1973,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
 
        fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
 
-       /* Pause RISC. */
-       rval = qla24xx_pause_risc(reg);
-       if (rval != QLA_SUCCESS)
-               goto qla83xx_fw_dump_failed_0;
+       /*
+        * Pause RISC. No need to track timeout, as resetting the chip
+        * is the right approach incase of pause timeout
+        */
+       qla24xx_pause_risc(reg, ha);
 
        WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
        dmp_reg = &reg->iobase_window;
@@ -2385,9 +2400,11 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
                        nxt += sizeof(fw->code_ram);
                        nxt += (ha->fw_memory_size - 0x100000 + 1);
                        goto copy_queue;
-               } else
+               } else {
+                       set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
                        ql_log(ql_log_warn, vha, 0xd010,
                            "bigger hammer success?\n");
+               }
        }
 
        rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
index cc961040f8b106b46625026c6301a80254dc9b03..e1fc4e66966aeab7b64bfd4ca9c75ca4da1a5be5 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -353,5 +353,6 @@ extern int qla27xx_dump_mpi_ram(struct qla_hw_data *, uint32_t, uint32_t *,
        uint32_t, void **);
 extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *,
        uint32_t, void **);
-extern int qla24xx_pause_risc(struct device_reg_24xx __iomem *);
+extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *,
+       struct qla_hw_data *);
 extern int qla24xx_soft_reset(struct qla_hw_data *);
index 6a106136716c7e91e0ed545e1758ca94a37d945d..1fa01044866637845d06a18fba953580a4696a5d 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -965,6 +965,13 @@ struct mbx_cmd_32 {
  */
 #define MBC_WRITE_MPI_REGISTER         0x01    /* Write MPI Register. */
 
+/*
+ * ISP8044 mailbox commands
+ */
+#define MBC_SET_GET_ETH_SERDES_REG     0x150
+#define HCS_WRITE_SERDES               0x3
+#define HCS_READ_SERDES                        0x4
+
 /* Firmware return data sizes */
 #define FCAL_MAP_SIZE  128
 
@@ -1622,10 +1629,20 @@ typedef struct {
 #define PO_MODE_DIF_PASS       2
 #define PO_MODE_DIF_REPLACE    3
 #define PO_MODE_DIF_TCP_CKSUM  6
-#define PO_ENABLE_DIF_BUNDLING BIT_8
 #define PO_ENABLE_INCR_GUARD_SEED      BIT_3
-#define PO_DISABLE_INCR_REF_TAG        BIT_5
 #define PO_DISABLE_GUARD_CHECK BIT_4
+#define PO_DISABLE_INCR_REF_TAG        BIT_5
+#define PO_DIS_HEADER_MODE     BIT_7
+#define PO_ENABLE_DIF_BUNDLING BIT_8
+#define PO_DIS_FRAME_MODE      BIT_9
+#define PO_DIS_VALD_APP_ESC    BIT_10 /* Dis validation for escape tag/ffffh */
+#define PO_DIS_VALD_APP_REF_ESC BIT_11
+
+#define PO_DIS_APP_TAG_REPL    BIT_12 /* disable REG Tag replacement */
+#define PO_DIS_REF_TAG_REPL    BIT_13
+#define PO_DIS_APP_TAG_VALD    BIT_14 /* disable REF Tag validation */
+#define PO_DIS_REF_TAG_VALD    BIT_15
+
 /*
  * ISP queue - 64-Bit addressing, continuation crc entry structure definition.
  */
@@ -1748,6 +1765,8 @@ typedef struct {
 #define CS_PORT_CONFIG_CHG     0x2A    /* Port Configuration Changed */
 #define CS_PORT_BUSY           0x2B    /* Port Busy */
 #define CS_COMPLETE_CHKCOND    0x30    /* Error? */
+#define CS_IOCB_ERROR          0x31    /* Generic error for IOCB request
+                                          failure */
 #define CS_BAD_PAYLOAD         0x80    /* Driver defined */
 #define CS_UNKNOWN             0x81    /* Driver defined */
 #define CS_RETRY               0x82    /* Driver defined */
@@ -2676,6 +2695,7 @@ struct rsp_que {
        uint32_t __iomem *rsp_q_out;
        uint16_t  ring_index;
        uint16_t  out_ptr;
+       uint16_t  *in_ptr;              /* queue shadow in index */
        uint16_t  length;
        uint16_t  options;
        uint16_t  rid;
@@ -2702,6 +2722,7 @@ struct req_que {
        uint32_t __iomem *req_q_out;
        uint16_t  ring_index;
        uint16_t  in_ptr;
+       uint16_t  *out_ptr;             /* queue shadow out index */
        uint16_t  cnt;
        uint16_t  length;
        uint16_t  options;
@@ -2907,6 +2928,8 @@ struct qla_hw_data {
 #define PCI_DEVICE_ID_QLOGIC_ISP8031   0x8031
 #define PCI_DEVICE_ID_QLOGIC_ISP2031   0x2031
 #define PCI_DEVICE_ID_QLOGIC_ISP2071   0x2071
+#define PCI_DEVICE_ID_QLOGIC_ISP2271   0x2271
+
        uint32_t        device_type;
 #define DT_ISP2100                      BIT_0
 #define DT_ISP2200                      BIT_1
@@ -2928,7 +2951,8 @@ struct qla_hw_data {
 #define DT_ISPFX00                     BIT_17
 #define DT_ISP8044                     BIT_18
 #define DT_ISP2071                     BIT_19
-#define DT_ISP_LAST                    (DT_ISP2071 << 1)
+#define DT_ISP2271                     BIT_20
+#define DT_ISP_LAST                    (DT_ISP2271 << 1)
 
 #define DT_T10_PI                       BIT_25
 #define DT_IIDMA                        BIT_26
@@ -2959,6 +2983,7 @@ struct qla_hw_data {
 #define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
 #define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
 #define IS_QLA2071(ha) (DT_MASK(ha) & DT_ISP2071)
+#define IS_QLA2271(ha) (DT_MASK(ha) & DT_ISP2271)
 
 #define IS_QLA23XX(ha)  (IS_QLA2300(ha) || IS_QLA2312(ha) || IS_QLA2322(ha) || \
                        IS_QLA6312(ha) || IS_QLA6322(ha))
@@ -2967,7 +2992,7 @@ struct qla_hw_data {
 #define IS_QLA25XX(ha)  (IS_QLA2532(ha))
 #define IS_QLA83XX(ha) (IS_QLA2031(ha) || IS_QLA8031(ha))
 #define IS_QLA84XX(ha)  (IS_QLA8432(ha))
-#define IS_QLA27XX(ha)  (IS_QLA2071(ha))
+#define IS_QLA27XX(ha)  (IS_QLA2071(ha) || IS_QLA2271(ha))
 #define IS_QLA24XX_TYPE(ha)     (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
                                IS_QLA84XX(ha))
 #define IS_CNA_CAPABLE(ha)     (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
@@ -3006,6 +3031,7 @@ struct qla_hw_data {
     (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
 #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)        (ha->tgt.atio_q_length)
+#define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
 
        /* HBA serial number */
        uint8_t         serial0;
@@ -3136,7 +3162,15 @@ struct qla_hw_data {
        struct qla2xxx_fw_dump *fw_dump;
        uint32_t        fw_dump_len;
        int             fw_dumped;
+       unsigned long   fw_dump_cap_flags;
+#define RISC_PAUSE_CMPL                0
+#define DMA_SHUTDOWN_CMPL      1
+#define ISP_RESET_CMPL         2
+#define RISC_RDY_AFT_RESET     3
+#define RISC_SRAM_DUMP_CMPL    4
+#define RISC_EXT_MEM_DUMP_CMPL 5
        int             fw_dump_reading;
+       int             prev_minidump_failed;
        dma_addr_t      eft_dma;
        void            *eft;
 /* Current size of mctp dump is 0x086064 bytes */
index 32ab80957688b8aec9be4e3d55f2a49656008151..2ca39b8e71667dfbd573b1cc5dc20685c578bc0b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 3a7353eaccbd4ece25c94965bba2cb0a15d3d5b5..eb8f57249f1dd6e1613d874562b9df0695100072 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -371,7 +371,10 @@ struct init_cb_24xx {
         * BIT 14 = Data Rate bit 1
         * BIT 15 = Data Rate bit 2
         * BIT 16 = Enable 75 ohm Termination Select
-        * BIT 17-31 = Reserved
+        * BIT 17-28 = Reserved
+        * BIT 29 = Enable response queue 0 in index shadowing
+        * BIT 30 = Enable request queue 0 out index shadowing
+        * BIT 31 = Reserved
         */
        uint32_t firmware_options_3;
        uint16_t qos;
@@ -1134,13 +1137,6 @@ struct device_reg_24xx {
 #define MIN_MULTI_ID_FABRIC    64      /* Must be power-of-2. */
 #define MAX_MULTI_ID_FABRIC    256     /* ... */
 
-#define for_each_mapped_vp_idx(_ha, _idx)              \
-       for (_idx = find_next_bit((_ha)->vp_idx_map,    \
-               (_ha)->max_npiv_vports + 1, 1);         \
-           _idx <= (_ha)->max_npiv_vports;             \
-           _idx = find_next_bit((_ha)->vp_idx_map,     \
-               (_ha)->max_npiv_vports + 1, _idx + 1))  \
-
 struct mid_conf_entry_24xx {
        uint16_t reserved_1;
 
index e665e8109933de19eef9b92977e171c1bc72af5f..d48dea8fab1ba50a6dbd0c63855ff73cb584ec2b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -220,6 +220,13 @@ extern unsigned long qla2x00_get_async_timeout(struct scsi_qla_host *);
 
 extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
 extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
+extern int qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *, srb_t *,
+       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_sglist(struct qla_hw_data *, srb_t *,
+       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+extern int qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *, srb_t *,
+       uint32_t *, uint16_t, struct qla_tgt_cmd *);
+
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
@@ -346,6 +353,11 @@ qla2x00_write_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t);
 extern int
 qla2x00_read_serdes_word(scsi_qla_host_t *, uint16_t, uint16_t *);
 
+extern int
+qla8044_write_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t);
+extern int
+qla8044_read_serdes_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
 extern int
 qla2x00_set_serdes_params(scsi_qla_host_t *, uint16_t, uint16_t, uint16_t);
 
index e377f9d2f92ab33ed0ae8462c0b7d2ea78310295..a0df3b1b3823cdddb8ad0ef6a3e326f0f7977479 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 38aeb54cd9d8076bdcb999811a2a068dd7e0962d..e2184412617d8d0ccce3527dceacfb351fc5b45a 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1476,6 +1476,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
        }
 
        ha->fw_dumped = 0;
+       ha->fw_dump_cap_flags = 0;
        dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0;
        req_q_size = rsp_q_size = 0;
 
@@ -2061,6 +2062,10 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
        icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
        icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
 
+       if (IS_SHADOW_REG_CAPABLE(ha))
+               icb->firmware_options_2 |=
+                   __constant_cpu_to_le32(BIT_30|BIT_29);
+
        if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
                icb->rid = __constant_cpu_to_le16(rid);
@@ -2138,6 +2143,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                req = ha->req_q_map[que];
                if (!req)
                        continue;
+               req->out_ptr = (void *)(req->ring + req->length);
+               *req->out_ptr = 0;
                for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
                        req->outstanding_cmds[cnt] = NULL;
 
@@ -2153,6 +2160,8 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                rsp = ha->rsp_q_map[que];
                if (!rsp)
                        continue;
+               rsp->in_ptr = (void *)(rsp->ring + rsp->length);
+               *rsp->in_ptr = 0;
                /* Initialize response queue entries */
                if (IS_QLAFX00(ha))
                        qlafx00_init_response_q_entries(rsp);
@@ -3406,7 +3415,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
                                            fcport->d_id.b.domain,
                                            fcport->d_id.b.area,
                                            fcport->d_id.b.al_pa);
-                                       fcport->loop_id = FC_NO_LOOP_ID;
+                                       qla2x00_clear_loop_id(fcport);
                                }
                        }
                }
@@ -4727,7 +4736,6 @@ static int
 qla2x00_restart_isp(scsi_qla_host_t *vha)
 {
        int status = 0;
-       uint32_t wait_time;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -4744,14 +4752,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
        if (!status && !(status = qla2x00_init_rings(vha))) {
                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
                ha->flags.chip_reset_done = 1;
+
                /* Initialize the queues in use */
                qla25xx_init_queues(ha);
 
                status = qla2x00_fw_ready(vha);
                if (!status) {
-                       ql_dbg(ql_dbg_taskm, vha, 0x8031,
-                           "Start configure loop status = %d.\n", status);
-
                        /* Issue a marker after FW becomes ready. */
                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
 
@@ -4766,24 +4772,12 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
                                qlt_24xx_process_atio_queue(vha);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-                       /* Wait at most MAX_TARGET RSCNs for a stable link. */
-                       wait_time = 256;
-                       do {
-                               clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                               qla2x00_configure_loop(vha);
-                               wait_time--;
-                       } while (!atomic_read(&vha->loop_down_timer) &&
-                               !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
-                               && wait_time && (test_bit(LOOP_RESYNC_NEEDED,
-                               &vha->dpc_flags)));
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                }
 
                /* if no cable then assume it's good */
                if ((vha->device_flags & DFLG_NO_CABLE))
                        status = 0;
-
-               ql_dbg(ql_dbg_taskm, vha, 0x8032,
-                   "Configure loop done, status = 0x%x.\n", status);
        }
        return (status);
 }
@@ -6130,7 +6124,6 @@ int
 qla82xx_restart_isp(scsi_qla_host_t *vha)
 {
        int status, rval;
-       uint32_t wait_time;
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
        struct rsp_que *rsp = ha->rsp_q_map[0];
@@ -6144,31 +6137,15 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
 
                status = qla2x00_fw_ready(vha);
                if (!status) {
-                       ql_log(ql_log_info, vha, 0x803c,
-                           "Start configure loop, status =%d.\n", status);
-
                        /* Issue a marker after FW becomes ready. */
                        qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
-
                        vha->flags.online = 1;
-                       /* Wait at most MAX_TARGET RSCNs for a stable link. */
-                       wait_time = 256;
-                       do {
-                               clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
-                               qla2x00_configure_loop(vha);
-                               wait_time--;
-                       } while (!atomic_read(&vha->loop_down_timer) &&
-                           !(test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) &&
-                           wait_time &&
-                           (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)));
+                       set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
                }
 
                /* if no cable then assume it's good */
                if ((vha->device_flags & DFLG_NO_CABLE))
                        status = 0;
-
-               ql_log(ql_log_info, vha, 0x8000,
-                   "Configure loop done, status = 0x%x.\n", status);
        }
 
        if (!status) {
@@ -6182,8 +6159,6 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
                        vha->marker_needed = 1;
                }
 
-               vha->flags.online = 1;
-
                ha->isp_ops->enable_intrs(ha);
 
                ha->isp_abort_cnt = 0;
index ce8b5fb0f347c46127d4b0a6077255db202c561f..b3b1d6fc2d6cb0e2bbe5d2272e5508d09b303212 100644 (file)
@@ -1,10 +1,11 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 
+#include "qla_target.h"
 /**
  * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
  * Continuation Type 1 IOCBs to allocate.
@@ -128,12 +129,20 @@ qla2x00_clear_loop_id(fc_port_t *fcport) {
 }
 
 static inline void
-qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
+qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp,
+       struct qla_tgt_cmd *tc)
 {
        struct dsd_dma *dsd_ptr, *tdsd_ptr;
        struct crc_context *ctx;
 
-       ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
+       if (sp)
+               ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
+       else if (tc)
+               ctx = (struct crc_context *)tc->ctx;
+       else {
+               BUG();
+               return;
+       }
 
        /* clean up allocated prev pool */
        list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
index e607568bce49e2a8608491ba84f1ac65d73af51e..76093152959203227e93e24d619c687f31f71158 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -936,9 +936,9 @@ qla24xx_get_one_block_sg(uint32_t blk_sz, struct qla2_sgx *sgx,
        return 1;
 }
 
-static int
+int
 qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
-       uint32_t *dsd, uint16_t tot_dsds)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
@@ -948,21 +948,35 @@ qla24xx_walk_and_build_sglist_no_difb(struct qla_hw_data *ha, srb_t *sp,
        uint32_t *cur_dsd = dsd;
        uint16_t        used_dsds = tot_dsds;
 
-       uint32_t        prot_int;
+       uint32_t        prot_int; /* protection interval */
        uint32_t        partial;
        struct qla2_sgx sgx;
        dma_addr_t      sle_dma;
        uint32_t        sle_dma_len, tot_prot_dma_len = 0;
-       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
-
-       prot_int = cmd->device->sector_size;
+       struct scsi_cmnd *cmd;
+       struct scsi_qla_host *vha;
 
        memset(&sgx, 0, sizeof(struct qla2_sgx));
-       sgx.tot_bytes = scsi_bufflen(cmd);
-       sgx.cur_sg = scsi_sglist(cmd);
-       sgx.sp = sp;
-
-       sg_prot = scsi_prot_sglist(cmd);
+       if (sp) {
+               vha = sp->fcport->vha;
+               cmd = GET_CMD_SP(sp);
+               prot_int = cmd->device->sector_size;
+
+               sgx.tot_bytes = scsi_bufflen(cmd);
+               sgx.cur_sg = scsi_sglist(cmd);
+               sgx.sp = sp;
+
+               sg_prot = scsi_prot_sglist(cmd);
+       } else if (tc) {
+               vha = tc->vha;
+               prot_int      = tc->blk_sz;
+               sgx.tot_bytes = tc->bufflen;
+               sgx.cur_sg    = tc->sg;
+               sg_prot       = tc->prot_sg;
+       } else {
+               BUG();
+               return 1;
+       }
 
        while (qla24xx_get_one_block_sg(prot_int, &sgx, &partial)) {
 
@@ -995,10 +1009,18 @@ alloc_and_fill:
                                return 1;
                        }
 
-                       list_add_tail(&dsd_ptr->list,
-                           &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+                       if (sp) {
+                               list_add_tail(&dsd_ptr->list,
+                                   &((struct crc_context *)
+                                           sp->u.scmd.ctx)->dsd_list);
+
+                               sp->flags |= SRB_CRC_CTX_DSD_VALID;
+                       } else {
+                               list_add_tail(&dsd_ptr->list,
+                                   &(tc->ctx->dsd_list));
+                               tc->ctx_dsd_alloced = 1;
+                       }
 
-                       sp->flags |= SRB_CRC_CTX_DSD_VALID;
 
                        /* add new list to cmd iocb or last list */
                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1033,21 +1055,35 @@ alloc_and_fill:
        return 0;
 }
 
-static int
+int
 qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
-       uint16_t tot_dsds)
+       uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
        uint32_t dsd_list_len;
        struct dsd_dma *dsd_ptr;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *sgl;
        uint32_t *cur_dsd = dsd;
        int     i;
        uint16_t        used_dsds = tot_dsds;
-       struct scsi_cmnd *cmd = GET_CMD_SP(sp);
+       struct scsi_cmnd *cmd;
+       struct scsi_qla_host *vha;
+
+       if (sp) {
+               cmd = GET_CMD_SP(sp);
+               sgl = scsi_sglist(cmd);
+               vha = sp->fcport->vha;
+       } else if (tc) {
+               sgl = tc->sg;
+               vha = tc->vha;
+       } else {
+               BUG();
+               return 1;
+       }
 
-       scsi_for_each_sg(cmd, sg, tot_dsds, i) {
+
+       for_each_sg(sgl, sg, tot_dsds, i) {
                dma_addr_t      sle_dma;
 
                /* Allocate additional continuation packets? */
@@ -1076,10 +1112,17 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
                                return 1;
                        }
 
-                       list_add_tail(&dsd_ptr->list,
-                           &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+                       if (sp) {
+                               list_add_tail(&dsd_ptr->list,
+                                   &((struct crc_context *)
+                                           sp->u.scmd.ctx)->dsd_list);
 
-                       sp->flags |= SRB_CRC_CTX_DSD_VALID;
+                               sp->flags |= SRB_CRC_CTX_DSD_VALID;
+                       } else {
+                               list_add_tail(&dsd_ptr->list,
+                                   &(tc->ctx->dsd_list));
+                               tc->ctx_dsd_alloced = 1;
+                       }
 
                        /* add new list to cmd iocb or last list */
                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1102,23 +1145,37 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
        return 0;
 }
 
-static int
+int
 qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
-                                                       uint32_t *dsd,
-       uint16_t tot_dsds)
+       uint32_t *dsd, uint16_t tot_dsds, struct qla_tgt_cmd *tc)
 {
        void *next_dsd;
        uint8_t avail_dsds = 0;
        uint32_t dsd_list_len;
        struct dsd_dma *dsd_ptr;
-       struct scatterlist *sg;
+       struct scatterlist *sg, *sgl;
        int     i;
        struct scsi_cmnd *cmd;
        uint32_t *cur_dsd = dsd;
-       uint16_t        used_dsds = tot_dsds;
+       uint16_t used_dsds = tot_dsds;
+       struct scsi_qla_host *vha;
+
+       if (sp) {
+               cmd = GET_CMD_SP(sp);
+               sgl = scsi_prot_sglist(cmd);
+               vha = sp->fcport->vha;
+       } else if (tc) {
+               vha = tc->vha;
+               sgl = tc->prot_sg;
+       } else {
+               BUG();
+               return 1;
+       }
 
-       cmd = GET_CMD_SP(sp);
-       scsi_for_each_prot_sg(cmd, sg, tot_dsds, i) {
+       ql_dbg(ql_dbg_tgt, vha, 0xe021,
+               "%s: enter\n", __func__);
+
+       for_each_sg(sgl, sg, tot_dsds, i) {
                dma_addr_t      sle_dma;
 
                /* Allocate additional continuation packets? */
@@ -1147,10 +1204,17 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
                                return 1;
                        }
 
-                       list_add_tail(&dsd_ptr->list,
-                           &((struct crc_context *)sp->u.scmd.ctx)->dsd_list);
+                       if (sp) {
+                               list_add_tail(&dsd_ptr->list,
+                                   &((struct crc_context *)
+                                           sp->u.scmd.ctx)->dsd_list);
 
-                       sp->flags |= SRB_CRC_CTX_DSD_VALID;
+                               sp->flags |= SRB_CRC_CTX_DSD_VALID;
+                       } else {
+                               list_add_tail(&dsd_ptr->list,
+                                   &(tc->ctx->dsd_list));
+                               tc->ctx_dsd_alloced = 1;
+                       }
 
                        /* add new list to cmd iocb or last list */
                        *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
@@ -1386,10 +1450,10 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
 
        if (!bundling && tot_prot_dsds) {
                if (qla24xx_walk_and_build_sglist_no_difb(ha, sp,
-                   cur_dsd, tot_dsds))
+                       cur_dsd, tot_dsds, NULL))
                        goto crc_queuing_error;
        } else if (qla24xx_walk_and_build_sglist(ha, sp, cur_dsd,
-           (tot_dsds - tot_prot_dsds)))
+                       (tot_dsds - tot_prot_dsds), NULL))
                goto crc_queuing_error;
 
        if (bundling && tot_prot_dsds) {
@@ -1398,7 +1462,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
                        __constant_cpu_to_le16(CF_DIF_SEG_DESCR_ENABLE);
                cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
                if (qla24xx_walk_and_build_prot_sglist(ha, sp, cur_dsd,
-                   tot_prot_dsds))
+                               tot_prot_dsds, NULL))
                        goto crc_queuing_error;
        }
        return QLA_SUCCESS;
@@ -1478,8 +1542,8 @@ qla24xx_start_scsi(srb_t *sp)
        tot_dsds = nseg;
        req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
        if (req->cnt < (req_cnt + 2)) {
-               cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -1697,8 +1761,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
        tot_prot_dsds = nseg;
        tot_dsds += nseg;
        if (req->cnt < (req_cnt + 2)) {
-               cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
                if (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
@@ -2825,8 +2889,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
 
        /* Check for room on request queue. */
        if (req->cnt < req_cnt + 2) {
-               cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
-
+               cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+                   RD_REG_DWORD_RELAXED(req->req_q_out);
                if  (req->ring_index < cnt)
                        req->cnt = cnt - req->ring_index;
                else
index 95314ef2e5050b402071f22b25c6323fdf7f9502..a56825c73c31418f31e570e8c0cde7dcbec6f09e 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -2009,11 +2009,13 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                ql_dbg(ql_dbg_io, vha, 0x3017,
                    "Invalid status handle (0x%x).\n", sts->handle);
 
-               if (IS_P3P_TYPE(ha))
-                       set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
-               else
-                       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-               qla2xxx_wake_dpc(vha);
+               if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
+                       if (IS_P3P_TYPE(ha))
+                               set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+                       else
+                               set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+                       qla2xxx_wake_dpc(vha);
+               }
                return;
        }
 
@@ -2472,12 +2474,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                if (pkt->entry_status != 0) {
                        qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
 
-                       (void)qlt_24xx_process_response_error(vha, pkt);
+                       if (qlt_24xx_process_response_error(vha, pkt))
+                               goto process_err;
 
                        ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
                        wmb();
                        continue;
                }
+process_err:
 
                switch (pkt->entry_type) {
                case STATUS_TYPE:
@@ -2494,10 +2498,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                        qla24xx_logio_entry(vha, rsp->req,
                            (struct logio_entry_24xx *)pkt);
                        break;
-                case CT_IOCB_TYPE:
+               case CT_IOCB_TYPE:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
                        break;
-                case ELS_IOCB_TYPE:
+               case ELS_IOCB_TYPE:
                        qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
                        break;
                case ABTS_RECV_24XX:
@@ -2506,6 +2510,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
                case ABTS_RESP_24XX:
                case CTIO_TYPE7:
                case NOTIFY_ACK_TYPE:
+               case CTIO_CRC2:
                        qlt_response_pkt_all_vps(vha, (response_t *)pkt);
                        break;
                case MARKER_TYPE:
index 2528709c4add35699fc08b81f8a8ca2a0b3c1505..1c33a77db5c235827a2c284f1aa14fc6407dcaef 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1319,7 +1319,7 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
 
                left = 0;
 
-               list = kzalloc(dma_size, GFP_KERNEL);
+               list = kmemdup(pmap, dma_size, GFP_KERNEL);
                if (!list) {
                        ql_log(ql_log_warn, vha, 0x1140,
                            "%s(%ld): failed to allocate node names list "
@@ -1328,7 +1328,6 @@ qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
                        goto out_free;
                }
 
-               memcpy(list, pmap, dma_size);
 restart:
                dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
        }
@@ -2644,7 +2643,10 @@ qla24xx_abort_command(srb_t *sp)
                ql_dbg(ql_dbg_mbx, vha, 0x1090,
                    "Failed to complete IOCB -- completion status (%x).\n",
                    le16_to_cpu(abt->nport_handle));
-               rval = QLA_FUNCTION_FAILED;
+               if (abt->nport_handle == CS_IOCB_ERROR)
+                       rval = QLA_FUNCTION_PARAMETER_ERROR;
+               else
+                       rval = QLA_FUNCTION_FAILED;
        } else {
                ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
                    "Done %s.\n", __func__);
@@ -2879,6 +2881,78 @@ qla2x00_read_serdes_word(scsi_qla_host_t *vha, uint16_t addr, uint16_t *data)
        return rval;
 }
 
+int
+qla8044_write_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_QLA8044(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1186,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+       mcp->mb[1] = HCS_WRITE_SERDES;
+       mcp->mb[3] = LSW(addr);
+       mcp->mb[4] = MSW(addr);
+       mcp->mb[5] = LSW(data);
+       mcp->mb[6] = MSW(data);
+       mcp->out_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x1187,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1188,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
+int
+qla8044_read_serdes_word(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_QLA8044(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1189,
+           "Entered %s.\n", __func__);
+
+       mcp->mb[0] = MBC_SET_GET_ETH_SERDES_REG;
+       mcp->mb[1] = HCS_READ_SERDES;
+       mcp->mb[3] = LSW(addr);
+       mcp->mb[4] = MSW(addr);
+       mcp->out_mb = MBX_4|MBX_3|MBX_1|MBX_0;
+       mcp->in_mb = MBX_2|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       *data = mcp->mb[2] << 16 | mcp->mb[1];
+
+       if (rval != QLA_SUCCESS) {
+               ql_dbg(ql_dbg_mbx, vha, 0x118a,
+                   "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+       } else {
+               ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118b,
+                   "Done %s.\n", __func__);
+       }
+
+       return rval;
+}
+
 /**
  * qla2x00_set_serdes_params() -
  * @ha: HA context
@@ -3660,6 +3734,9 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
            "Entered %s.\n", __func__);
 
+       if (IS_SHADOW_REG_CAPABLE(ha))
+               req->options |= BIT_13;
+
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = req->options;
        mcp->mb[2] = MSW(LSD(req->dma));
@@ -3679,7 +3756,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        /* que in ptr index */
        mcp->mb[8] = 0;
        /* que out ptr index */
-       mcp->mb[9] = 0;
+       mcp->mb[9] = *req->out_ptr = 0;
        mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7|
                        MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
@@ -3688,7 +3765,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
 
        if (IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha))
                mcp->in_mb |= MBX_1;
-       if (IS_QLA83XX(ha) || !IS_QLA27XX(ha)) {
+       if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
                mcp->out_mb |= MBX_15;
                /* debug q create issue in SR-IOV */
                mcp->in_mb |= MBX_9 | MBX_8 | MBX_7;
@@ -3697,7 +3774,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (!(req->options & BIT_0)) {
                WRT_REG_DWORD(req->req_q_in, 0);
-               if (!IS_QLA83XX(ha) || !IS_QLA27XX(ha))
+               if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
                        WRT_REG_DWORD(req->req_q_out, 0);
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3726,6 +3803,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
            "Entered %s.\n", __func__);
 
+       if (IS_SHADOW_REG_CAPABLE(ha))
+               rsp->options |= BIT_13;
+
        mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
        mcp->mb[1] = rsp->options;
        mcp->mb[2] = MSW(LSD(rsp->dma));
@@ -3740,7 +3820,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
 
        mcp->mb[4] = rsp->id;
        /* que in ptr index */
-       mcp->mb[8] = 0;
+       mcp->mb[8] = *rsp->in_ptr = 0;
        /* que out ptr index */
        mcp->mb[9] = 0;
        mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
index f0a852257f9940483b04a71f1a4c91449039c6c3..89998244f48d30782dc51db46ce49dee06dd28c9 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index 0aaf6a9c87d3a834f726658d33ecbc5f9e6fb011..abeb3901498b2c1714e99284d3c4a1e7db4ef145 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -527,21 +527,63 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        int i, core;
        uint32_t cnt;
+       uint32_t reg_val;
+
+       spin_lock_irqsave(&ha->hardware_lock, flags);
+
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x80004, 0);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x82004, 0);
+
+       /* stop the XOR DMA engines */
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x60920, 0x02);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x60924, 0x02);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0xf0920, 0x02);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0xf0924, 0x02);
+
+       /* stop the IDMA engines */
+       reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60840);
+       reg_val &= ~(1<<12);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x60840, reg_val);
+
+       reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60844);
+       reg_val &= ~(1<<12);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x60844, reg_val);
+
+       reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x60848);
+       reg_val &= ~(1<<12);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x60848, reg_val);
+
+       reg_val = QLAFX00_GET_HBA_SOC_REG(ha, 0x6084C);
+       reg_val &= ~(1<<12);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x6084C, reg_val);
+
+       for (i = 0; i < 100000; i++) {
+               if ((QLAFX00_GET_HBA_SOC_REG(ha, 0xd0000) & 0x10000000) == 0 &&
+                   (QLAFX00_GET_HBA_SOC_REG(ha, 0x10600) & 0x1) == 0)
+                       break;
+               udelay(100);
+       }
 
        /* Set all 4 cores in reset */
        for (i = 0; i < 4; i++) {
                QLAFX00_SET_HBA_SOC_REG(ha,
                    (SOC_SW_RST_CONTROL_REG_CORE0 + 8*i), (0xF01));
-       }
-
-       /* Set all 4 core Clock gating control */
-       for (i = 0; i < 4; i++) {
                QLAFX00_SET_HBA_SOC_REG(ha,
                    (SOC_SW_RST_CONTROL_REG_CORE0 + 4 + 8*i), (0x01010101));
        }
 
        /* Reset all units in Fabric */
-       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x11F0101));
+       QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x011f0101));
+
+       /* */
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x10610, 1);
+       QLAFX00_SET_HBA_SOC_REG(ha, 0x10600, 0);
+
+       /* Set all 4 core Memory Power Down Registers */
+       for (i = 0; i < 5; i++) {
+               QLAFX00_SET_HBA_SOC_REG(ha,
+                   (SOC_PWR_MANAGEMENT_PWR_DOWN_REG + 4*i), (0x0));
+       }
 
        /* Reset all interrupt control registers */
        for (i = 0; i < 115; i++) {
@@ -564,20 +606,19 @@ qlafx00_soc_cpu_reset(scsi_qla_host_t *vha)
        QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONTROL_REG, (0x2));
        QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_CONFIG_REG, (0x3));
 
-       spin_lock_irqsave(&ha->hardware_lock, flags);
-
        /* Kick in Fabric units */
        QLAFX00_SET_HBA_SOC_REG(ha, SOC_FABRIC_RST_CONTROL_REG, (0x0));
 
        /* Kick in Core0 to start boot process */
        QLAFX00_SET_HBA_SOC_REG(ha, SOC_SW_RST_CONTROL_REG_CORE0, (0xF00));
 
+       spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        /* Wait 10secs for soft-reset to complete. */
        for (cnt = 10; cnt; cnt--) {
                msleep(1000);
                barrier();
        }
-       spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
 /**
@@ -597,7 +638,6 @@ qlafx00_soft_reset(scsi_qla_host_t *vha)
 
        ha->isp_ops->disable_intrs(ha);
        qlafx00_soc_cpu_reset(vha);
-       ha->isp_ops->enable_intrs(ha);
 }
 
 /**
@@ -2675,7 +2715,7 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
        uint16_t lreq_q_out = 0;
 
        lreq_q_in = RD_REG_DWORD(rsp->rsp_q_in);
-       lreq_q_out = RD_REG_DWORD(rsp->rsp_q_out);
+       lreq_q_out = rsp->ring_index;
 
        while (lreq_q_in != lreq_q_out) {
                lptr = rsp->ring_ptr;
@@ -3426,7 +3466,7 @@ qlafx00_fxdisc_iocb(srb_t *sp, struct fxdisc_entry_fx00 *pfxiocb)
            sp->fcport->vha, 0x3047,
            (uint8_t *)&fx_iocb, sizeof(struct fxdisc_entry_fx00));
 
-       memcpy((void *)pfxiocb, &fx_iocb,
+       memcpy_toio((void __iomem *)pfxiocb, &fx_iocb,
            sizeof(struct fxdisc_entry_fx00));
        wmb();
 }
index e529dfaeb8542399d5edc840df5d7fb809b2fa26..aeaa1b40b1fc39187d0c3afd3ea768d9d9c042ac 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -351,6 +351,7 @@ struct config_info_data {
 #define SOC_FABRIC_RST_CONTROL_REG       0x0020840
 #define SOC_FABRIC_CONTROL_REG           0x0020200
 #define SOC_FABRIC_CONFIG_REG            0x0020204
+#define SOC_PWR_MANAGEMENT_PWR_DOWN_REG  0x001820C
 
 #define SOC_INTERRUPT_SOURCE_I_CONTROL_REG     0x0020B00
 #define SOC_CORE_TIMER_REG                     0x0021850
index 5511e24b1f11ca3e5cc519eec8e84f08febbaa6a..58f3c912d96ea6ac14ac743281c4c584fd5a84ba 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -848,6 +848,7 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
 {
        int done = 0, timeout = 0;
        uint32_t lock_owner = 0;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        while (!done) {
                /* acquire semaphore2 from PCI HW block */
@@ -856,17 +857,21 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
                        break;
                if (timeout >= qla82xx_rom_lock_timeout) {
                        lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+                       ql_log(ql_log_warn, vha, 0xb157,
+                           "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+                           __func__, ha->portnum, lock_owner);
                        return -1;
                }
                timeout++;
        }
-       qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
+       qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ha->portnum);
        return 0;
 }
 
 static void
 qla82xx_rom_unlock(struct qla_hw_data *ha)
 {
+       qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, 0xffffffff);
        qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK));
 }
 
@@ -950,6 +955,7 @@ static int
 qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
 {
        int ret, loops = 0;
+       uint32_t lock_owner = 0;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -958,8 +964,10 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
                loops++;
        }
        if (loops >= 50000) {
+               lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
                ql_log(ql_log_fatal, vha, 0x00b9,
-                   "Failed to acquire SEM2 lock.\n");
+                   "Failed to acquire SEM2 lock, Lock Owner %u.\n",
+                   lock_owner);
                return -1;
        }
        ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -1057,6 +1065,7 @@ static int
 ql82xx_rom_lock_d(struct qla_hw_data *ha)
 {
        int loops = 0;
+       uint32_t lock_owner = 0;
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
 
        while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) {
@@ -1065,8 +1074,9 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
                loops++;
        }
        if (loops >= 50000) {
+               lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
                ql_log(ql_log_warn, vha, 0xb010,
-                   "ROM lock failed.\n");
+                   "ROM lock failed, Lock Owner %u.\n", lock_owner);
                return -1;
        }
        return 0;
@@ -2811,12 +2821,14 @@ static void
 qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
 {
        scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+       uint32_t lock_owner = 0;
 
-       if (qla82xx_rom_lock(ha))
+       if (qla82xx_rom_lock(ha)) {
+               lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
                /* Someone else is holding the lock. */
                ql_log(ql_log_info, vha, 0xb022,
-                   "Resetting rom_lock.\n");
-
+                   "Resetting rom_lock, Lock Owner %u.\n", lock_owner);
+       }
        /*
         * Either we got the lock, or someone
         * else died while holding it.
@@ -2840,47 +2852,30 @@ static int
 qla82xx_device_bootstrap(scsi_qla_host_t *vha)
 {
        int rval = QLA_SUCCESS;
-       int i, timeout;
+       int i;
        uint32_t old_count, count;
        struct qla_hw_data *ha = vha->hw;
-       int need_reset = 0, peg_stuck = 1;
+       int need_reset = 0;
 
        need_reset = qla82xx_need_reset(ha);
 
-       old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
-
-       for (i = 0; i < 10; i++) {
-               timeout = msleep_interruptible(200);
-               if (timeout) {
-                       qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
-                               QLA8XXX_DEV_FAILED);
-                       return QLA_FUNCTION_FAILED;
-               }
-
-               count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
-               if (count != old_count)
-                       peg_stuck = 0;
-       }
-
        if (need_reset) {
                /* We are trying to perform a recovery here. */
-               if (peg_stuck)
+               if (ha->flags.isp82xx_fw_hung)
                        qla82xx_rom_lock_recovery(ha);
-               goto dev_initialize;
        } else  {
-               /* Start of day for this ha context. */
-               if (peg_stuck) {
-                       /* Either we are the first or recovery in progress. */
-                       qla82xx_rom_lock_recovery(ha);
-                       goto dev_initialize;
-               } else
-                       /* Firmware already running. */
-                       goto dev_ready;
+               old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+               for (i = 0; i < 10; i++) {
+                       msleep(200);
+                       count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
+                       if (count != old_count) {
+                               rval = QLA_SUCCESS;
+                               goto dev_ready;
+                       }
+               }
+               qla82xx_rom_lock_recovery(ha);
        }
 
-       return rval;
-
-dev_initialize:
        /* set to DEV_INITIALIZING */
        ql_log(ql_log_info, vha, 0x009e,
            "HW State: INITIALIZING.\n");
@@ -3142,18 +3137,18 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
 
        if (ql2xmdenable) {
                if (!ha->fw_dumped) {
-                       if (fw_major_version != ha->fw_major_version ||
+                       if ((fw_major_version != ha->fw_major_version ||
                            fw_minor_version != ha->fw_minor_version ||
-                           fw_subminor_version != ha->fw_subminor_version) {
+                           fw_subminor_version != ha->fw_subminor_version) ||
+                           (ha->prev_minidump_failed)) {
                                ql_dbg(ql_dbg_p3p, vha, 0xb02d,
-                                   "Firmware version differs "
-                                   "Previous version: %d:%d:%d - "
-                                   "New version: %d:%d:%d\n",
+                                   "Firmware version differs Previous version: %d:%d:%d - New version: %d:%d:%d, prev_minidump_failed: %d.\n",
                                    fw_major_version, fw_minor_version,
                                    fw_subminor_version,
                                    ha->fw_major_version,
                                    ha->fw_minor_version,
-                                   ha->fw_subminor_version);
+                                   ha->fw_subminor_version,
+                                   ha->prev_minidump_failed);
                                /* Release MiniDump resources */
                                qla82xx_md_free(vha);
                                /* ALlocate MiniDump resources */
@@ -3682,8 +3677,10 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
                        for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
                                sp = req->outstanding_cmds[cnt];
                                if (sp) {
-                                       if (!sp->u.scmd.ctx ||
-                                           (sp->flags & SRB_FCP_CMND_DMA_VALID)) {
+                                       if ((!sp->u.scmd.ctx ||
+                                           (sp->flags &
+                                               SRB_FCP_CMND_DMA_VALID)) &&
+                                               !ha->flags.isp82xx_fw_hung) {
                                                spin_unlock_irqrestore(
                                                    &ha->hardware_lock, flags);
                                                if (ha->isp_ops->abort_command(sp)) {
index 1bb93dbbccbb8c37cdf6b02fec994d72fc4642d1..59c477883a7325ba1810dd833e9c03661156ff58 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #define QLA82XX_ROMUSB_ROM_INSTR_OPCODE                (ROMUSB_ROM + 0x0004)
 #define QLA82XX_ROMUSB_GLB_CAS_RST             (ROMUSB_GLB + 0x0038)
 
-/* Lock IDs for ROM lock */
-#define ROM_LOCK_DRIVER       0x0d417340
-
 #define QLA82XX_PCI_CRB_WINDOWSIZE 0x00100000   /* all are 1MB windows */
 #define QLA82XX_PCI_CRB_WINDOW(A) \
        (QLA82XX_PCI_CRBSPACE + (A)*QLA82XX_PCI_CRB_WINDOWSIZE)
@@ -1186,6 +1183,7 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
 #define CRB_NIU_XG_PAUSE_CTL_P1        0x8
 
 #define qla82xx_get_temp_val(x)          ((x) >> 16)
+#define qla82xx_get_temp_val1(x)          ((x) && 0x0000FFFF)
 #define qla82xx_get_temp_state(x)        ((x) & 0xffff)
 #define qla82xx_encode_temp(val, state)  (((val) << 16) | (state))
 
index 86cf10815db0c838ea92cfe9ff6727b2e07c94d2..da9e3902f2190b6a61d7f06200dbe5806f15dced 100644 (file)
@@ -1,17 +1,20 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 
 #include <linux/vmalloc.h>
+#include <linux/delay.h>
 
 #include "qla_def.h"
 #include "qla_gbl.h"
 
 #include <linux/delay.h>
 
+#define TIMEOUT_100_MS 100
+
 /* 8044 Flash Read/Write functions */
 uint32_t
 qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
@@ -117,6 +120,95 @@ qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
        qla8044_wr_reg_indirect(vha, waddr, value);
 }
 
+static int
+qla8044_poll_wait_for_ready(struct scsi_qla_host *vha, uint32_t addr1,
+       uint32_t mask)
+{
+       unsigned long timeout;
+       uint32_t temp;
+
+       /* jiffies after 100ms */
+       timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+       do {
+               qla8044_rd_reg_indirect(vha, addr1, &temp);
+               if ((temp & mask) != 0)
+                       break;
+               if (time_after_eq(jiffies, timeout)) {
+                       ql_log(ql_log_warn, vha, 0xb151,
+                               "Error in processing rdmdio entry\n");
+                       return -1;
+               }
+       } while (1);
+
+       return 0;
+}
+
+static uint32_t
+qla8044_ipmdio_rd_reg(struct scsi_qla_host *vha,
+       uint32_t addr1, uint32_t addr3, uint32_t mask, uint32_t addr)
+{
+       uint32_t temp;
+       int ret = 0;
+
+       ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+       if (ret == -1)
+               return -1;
+
+       temp = (0x40000000 | addr);
+       qla8044_wr_reg_indirect(vha, addr1, temp);
+
+       ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+       if (ret == -1)
+               return 0;
+
+       qla8044_rd_reg_indirect(vha, addr3, &ret);
+
+       return ret;
+}
+
+
+static int
+qla8044_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *vha,
+       uint32_t addr1, uint32_t addr2, uint32_t addr3, uint32_t mask)
+{
+       unsigned long timeout;
+       uint32_t temp;
+
+       /* jiffies after 100 msecs */
+       timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+       do {
+               temp = qla8044_ipmdio_rd_reg(vha, addr1, addr3, mask, addr2);
+               if ((temp & 0x1) != 1)
+                       break;
+               if (time_after_eq(jiffies, timeout)) {
+                       ql_log(ql_log_warn, vha, 0xb152,
+                           "Error in processing mdiobus idle\n");
+                       return -1;
+               }
+       } while (1);
+
+       return 0;
+}
+
+static int
+qla8044_ipmdio_wr_reg(struct scsi_qla_host *vha, uint32_t addr1,
+       uint32_t addr3, uint32_t mask, uint32_t addr, uint32_t value)
+{
+       int ret = 0;
+
+       ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+       if (ret == -1)
+               return -1;
+
+       qla8044_wr_reg_indirect(vha, addr3, value);
+       qla8044_wr_reg_indirect(vha, addr1, addr);
+
+       ret = qla8044_poll_wait_for_ready(vha, addr1, mask);
+       if (ret == -1)
+               return -1;
+
+       return 0;
+}
 /*
  * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
  * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
@@ -356,8 +448,8 @@ qla8044_flash_lock(scsi_qla_host_t *vha)
                        lock_owner = qla8044_rd_reg(ha,
                            QLA8044_FLASH_LOCK_ID);
                        ql_log(ql_log_warn, vha, 0xb113,
-                           "%s: flash lock by %d failed, held by %d\n",
-                               __func__, ha->portnum, lock_owner);
+                           "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
+                           __func__, ha->portnum, lock_owner);
                        ret_val = QLA_FUNCTION_FAILED;
                        break;
                }
@@ -1541,7 +1633,7 @@ static void
 qla8044_need_reset_handler(struct scsi_qla_host *vha)
 {
        uint32_t dev_state = 0, drv_state, drv_active;
-       unsigned long reset_timeout, dev_init_timeout;
+       unsigned long reset_timeout;
        struct qla_hw_data *ha = vha->hw;
 
        ql_log(ql_log_fatal, vha, 0xb0c2,
@@ -1555,84 +1647,78 @@ qla8044_need_reset_handler(struct scsi_qla_host *vha)
                qla8044_idc_lock(ha);
        }
 
+       dev_state = qla8044_rd_direct(vha,
+           QLA8044_CRB_DEV_STATE_INDEX);
        drv_state = qla8044_rd_direct(vha,
            QLA8044_CRB_DRV_STATE_INDEX);
        drv_active = qla8044_rd_direct(vha,
            QLA8044_CRB_DRV_ACTIVE_INDEX);
 
        ql_log(ql_log_info, vha, 0xb0c5,
-           "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
-           __func__, vha->host_no, drv_state, drv_active);
+           "%s(%ld): drv_state = 0x%x, drv_active = 0x%x dev_state = 0x%x\n",
+           __func__, vha->host_no, drv_state, drv_active, dev_state);
 
-       if (!ha->flags.nic_core_reset_owner) {
-               ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
-                   "%s(%ld): reset acknowledged\n",
-                   __func__, vha->host_no);
-               qla8044_set_rst_ready(vha);
+       qla8044_set_rst_ready(vha);
 
-               /* Non-reset owners ACK Reset and wait for device INIT state
-                * as part of Reset Recovery by Reset Owner
-                */
-               dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+       /* wait for 10 seconds for reset ack from all functions */
+       reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
 
-               do {
-                       if (time_after_eq(jiffies, dev_init_timeout)) {
-                               ql_log(ql_log_info, vha, 0xb0c4,
-                                   "%s: Non Reset owner: Reset Ack Timeout!\n",
-                                   __func__);
-                               break;
-                       }
+       do {
+               if (time_after_eq(jiffies, reset_timeout)) {
+                       ql_log(ql_log_info, vha, 0xb0c4,
+                           "%s: Function %d: Reset Ack Timeout!, drv_state: 0x%08x, drv_active: 0x%08x\n",
+                           __func__, ha->portnum, drv_state, drv_active);
+                       break;
+               }
 
-                       qla8044_idc_unlock(ha);
-                       msleep(1000);
-                       qla8044_idc_lock(ha);
+               qla8044_idc_unlock(ha);
+               msleep(1000);
+               qla8044_idc_lock(ha);
 
-                       dev_state = qla8044_rd_direct(vha,
-                                       QLA8044_CRB_DEV_STATE_INDEX);
-               } while (((drv_state & drv_active) != drv_active) &&
-                   (dev_state == QLA8XXX_DEV_NEED_RESET));
+               dev_state = qla8044_rd_direct(vha,
+                   QLA8044_CRB_DEV_STATE_INDEX);
+               drv_state = qla8044_rd_direct(vha,
+                   QLA8044_CRB_DRV_STATE_INDEX);
+               drv_active = qla8044_rd_direct(vha,
+                   QLA8044_CRB_DRV_ACTIVE_INDEX);
+       } while (((drv_state & drv_active) != drv_active) &&
+           (dev_state == QLA8XXX_DEV_NEED_RESET));
+
+       /* Remove IDC participation of functions not acknowledging */
+       if (drv_state != drv_active) {
+               ql_log(ql_log_info, vha, 0xb0c7,
+                   "%s(%ld): Function %d turning off drv_active of non-acking function 0x%x\n",
+                   __func__, vha->host_no, ha->portnum,
+                   (drv_active ^ drv_state));
+               drv_active = drv_active & drv_state;
+               qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+                   drv_active);
        } else {
-               qla8044_set_rst_ready(vha);
-
-               /* wait for 10 seconds for reset ack from all functions */
-               reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
-
-               while ((drv_state & drv_active) != drv_active) {
-                       if (time_after_eq(jiffies, reset_timeout)) {
-                               ql_log(ql_log_info, vha, 0xb0c6,
-                                   "%s: RESET TIMEOUT!"
-                                   "drv_state: 0x%08x, drv_active: 0x%08x\n",
-                                   QLA2XXX_DRIVER_NAME, drv_state, drv_active);
-                               break;
-                       }
-
-                       qla8044_idc_unlock(ha);
-                       msleep(1000);
-                       qla8044_idc_lock(ha);
-
-                       drv_state = qla8044_rd_direct(vha,
-                           QLA8044_CRB_DRV_STATE_INDEX);
-                       drv_active = qla8044_rd_direct(vha,
-                           QLA8044_CRB_DRV_ACTIVE_INDEX);
-               }
-
-               if (drv_state != drv_active) {
-                       ql_log(ql_log_info, vha, 0xb0c7,
-                           "%s(%ld): Reset_owner turning off drv_active "
-                           "of non-acking function 0x%x\n", __func__,
-                           vha->host_no, (drv_active ^ drv_state));
-                       drv_active = drv_active & drv_state;
-                       qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
-                           drv_active);
+               /*
+                * Reset owner should execute reset recovery,
+                * if all functions acknowledged
+                */
+               if ((ha->flags.nic_core_reset_owner) &&
+                   (dev_state == QLA8XXX_DEV_NEED_RESET)) {
+                       ha->flags.nic_core_reset_owner = 0;
+                       qla8044_device_bootstrap(vha);
+                       return;
                }
+       }
 
-               /*
-               * Clear RESET OWNER, will be set at next reset
-               * by next RST_OWNER
-               */
+       /* Exit if non active function */
+       if (!(drv_active & (1 << ha->portnum))) {
                ha->flags.nic_core_reset_owner = 0;
+               return;
+       }
 
-               /* Start Reset Recovery */
+       /*
+        * Execute Reset Recovery if Reset Owner or Function 7
+        * is the only active function
+        */
+       if (ha->flags.nic_core_reset_owner ||
+           ((drv_state & drv_active) == QLA8044_FUN7_ACTIVE_INDEX)) {
+               ha->flags.nic_core_reset_owner = 0;
                qla8044_device_bootstrap(vha);
        }
 }
@@ -1655,6 +1741,19 @@ qla8044_set_drv_active(struct scsi_qla_host *vha)
        qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
 }
 
+static int
+qla8044_check_drv_active(struct scsi_qla_host *vha)
+{
+       uint32_t drv_active;
+       struct qla_hw_data *ha = vha->hw;
+
+       drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+       if (drv_active & (1 << ha->portnum))
+               return QLA_SUCCESS;
+       else
+               return QLA_TEST_FAILED;
+}
+
 static void
 qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
 {
@@ -1837,14 +1936,16 @@ qla8044_device_state_handler(struct scsi_qla_host *vha)
 
        while (1) {
                if (time_after_eq(jiffies, dev_init_timeout)) {
-                       ql_log(ql_log_warn, vha, 0xb0cf,
-                           "%s: Device Init Failed 0x%x = %s\n",
-                           QLA2XXX_DRIVER_NAME, dev_state,
-                           dev_state < MAX_STATES ?
-                           qdev_state(dev_state) : "Unknown");
-
-                       qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
-                           QLA8XXX_DEV_FAILED);
+                       if (qla8044_check_drv_active(vha) == QLA_SUCCESS) {
+                               ql_log(ql_log_warn, vha, 0xb0cf,
+                                   "%s: Device Init Failed 0x%x = %s\n",
+                                   QLA2XXX_DRIVER_NAME, dev_state,
+                                   dev_state < MAX_STATES ?
+                                   qdev_state(dev_state) : "Unknown");
+                               qla8044_wr_direct(vha,
+                                   QLA8044_CRB_DEV_STATE_INDEX,
+                                   QLA8XXX_DEV_FAILED);
+                       }
                }
 
                dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
@@ -2017,6 +2118,13 @@ qla8044_watchdog(struct scsi_qla_host *vha)
            test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
                dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
 
+               if (qla8044_check_fw_alive(vha)) {
+                       ha->flags.isp82xx_fw_hung = 1;
+                       ql_log(ql_log_warn, vha, 0xb10a,
+                           "Firmware hung.\n");
+                       qla82xx_clear_pending_mbx(vha);
+               }
+
                if (qla8044_check_temp(vha)) {
                        set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
                        ha->flags.isp82xx_fw_hung = 1;
@@ -2037,7 +2145,7 @@ qla8044_watchdog(struct scsi_qla_host *vha)
                        qla2xxx_wake_dpc(vha);
                } else  {
                        /* Check firmware health */
-                       if (qla8044_check_fw_alive(vha)) {
+                       if (ha->flags.isp82xx_fw_hung) {
                                halt_status = qla8044_rd_direct(vha,
                                        QLA8044_PEG_HALT_STATUS1_INDEX);
                                if (halt_status &
@@ -2073,12 +2181,8 @@ qla8044_watchdog(struct scsi_qla_host *vha)
                                                    __func__);
                                                set_bit(ISP_ABORT_NEEDED,
                                                    &vha->dpc_flags);
-                                               qla82xx_clear_pending_mbx(vha);
                                        }
                                }
-                               ha->flags.isp82xx_fw_hung = 1;
-                               ql_log(ql_log_warn, vha, 0xb10a,
-                                   "Firmware hung.\n");
                                qla2xxx_wake_dpc(vha);
                        }
                }
@@ -2286,8 +2390,6 @@ qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
                }
 
                if (j >= MAX_CTL_CHECK) {
-                       printk_ratelimited(KERN_ERR
-                           "%s: failed to read through agent\n", __func__);
                        write_unlock_irqrestore(&ha->hw_lock, flags);
                        return QLA_SUCCESS;
                }
@@ -2882,6 +2984,231 @@ error_exit:
        return rval;
 }
 
+static uint32_t
+qla8044_minidump_process_rddfe(struct scsi_qla_host *vha,
+       struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+       int loop_cnt;
+       uint32_t addr1, addr2, value, data, temp, wrVal;
+       uint8_t stride, stride2;
+       uint16_t count;
+       uint32_t poll, mask, data_size, modify_mask;
+       uint32_t wait_count = 0;
+
+       uint32_t *data_ptr = *d_ptr;
+
+       struct qla8044_minidump_entry_rddfe *rddfe;
+       rddfe = (struct qla8044_minidump_entry_rddfe *) entry_hdr;
+
+       addr1 = rddfe->addr_1;
+       value = rddfe->value;
+       stride = rddfe->stride;
+       stride2 = rddfe->stride2;
+       count = rddfe->count;
+
+       poll = rddfe->poll;
+       mask = rddfe->mask;
+       modify_mask = rddfe->modify_mask;
+       data_size = rddfe->data_size;
+
+       addr2 = addr1 + stride;
+
+       for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+               qla8044_wr_reg_indirect(vha, addr1, (0x40000000 | value));
+
+               wait_count = 0;
+               while (wait_count < poll) {
+                       qla8044_rd_reg_indirect(vha, addr1, &temp);
+                       if ((temp & mask) != 0)
+                               break;
+                       wait_count++;
+               }
+
+               if (wait_count == poll) {
+                       ql_log(ql_log_warn, vha, 0xb153,
+                           "%s: TIMEOUT\n", __func__);
+                       goto error;
+               } else {
+                       qla8044_rd_reg_indirect(vha, addr2, &temp);
+                       temp = temp & modify_mask;
+                       temp = (temp | ((loop_cnt << 16) | loop_cnt));
+                       wrVal = ((temp << 16) | temp);
+
+                       qla8044_wr_reg_indirect(vha, addr2, wrVal);
+                       qla8044_wr_reg_indirect(vha, addr1, value);
+
+                       wait_count = 0;
+                       while (wait_count < poll) {
+                               qla8044_rd_reg_indirect(vha, addr1, &temp);
+                               if ((temp & mask) != 0)
+                                       break;
+                               wait_count++;
+                       }
+                       if (wait_count == poll) {
+                               ql_log(ql_log_warn, vha, 0xb154,
+                                   "%s: TIMEOUT\n", __func__);
+                               goto error;
+                       }
+
+                       qla8044_wr_reg_indirect(vha, addr1,
+                           ((0x40000000 | value) + stride2));
+                       wait_count = 0;
+                       while (wait_count < poll) {
+                               qla8044_rd_reg_indirect(vha, addr1, &temp);
+                               if ((temp & mask) != 0)
+                                       break;
+                               wait_count++;
+                       }
+
+                       if (wait_count == poll) {
+                               ql_log(ql_log_warn, vha, 0xb155,
+                                   "%s: TIMEOUT\n", __func__);
+                               goto error;
+                       }
+
+                       qla8044_rd_reg_indirect(vha, addr2, &data);
+
+                       *data_ptr++ = wrVal;
+                       *data_ptr++ = data;
+               }
+
+       }
+
+       *d_ptr = data_ptr;
+       return QLA_SUCCESS;
+
+error:
+       return -1;
+
+}
+
+static uint32_t
+qla8044_minidump_process_rdmdio(struct scsi_qla_host *vha,
+       struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+       int ret = 0;
+       uint32_t addr1, addr2, value1, value2, data, selVal;
+       uint8_t stride1, stride2;
+       uint32_t addr3, addr4, addr5, addr6, addr7;
+       uint16_t count, loop_cnt;
+       uint32_t poll, mask;
+       uint32_t *data_ptr = *d_ptr;
+
+       struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+       rdmdio = (struct qla8044_minidump_entry_rdmdio *) entry_hdr;
+
+       addr1 = rdmdio->addr_1;
+       addr2 = rdmdio->addr_2;
+       value1 = rdmdio->value_1;
+       stride1 = rdmdio->stride_1;
+       stride2 = rdmdio->stride_2;
+       count = rdmdio->count;
+
+       poll = rdmdio->poll;
+       mask = rdmdio->mask;
+       value2 = rdmdio->value_2;
+
+       addr3 = addr1 + stride1;
+
+       for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+               ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+                   addr3, mask);
+               if (ret == -1)
+                       goto error;
+
+               addr4 = addr2 - stride1;
+               ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr4,
+                   value2);
+               if (ret == -1)
+                       goto error;
+
+               addr5 = addr2 - (2 * stride1);
+               ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask, addr5,
+                   value1);
+               if (ret == -1)
+                       goto error;
+
+               addr6 = addr2 - (3 * stride1);
+               ret = qla8044_ipmdio_wr_reg(vha, addr1, addr3, mask,
+                   addr6, 0x2);
+               if (ret == -1)
+                       goto error;
+
+               ret = qla8044_poll_wait_ipmdio_bus_idle(vha, addr1, addr2,
+                   addr3, mask);
+               if (ret == -1)
+                       goto error;
+
+               addr7 = addr2 - (4 * stride1);
+                       data = qla8044_ipmdio_rd_reg(vha, addr1, addr3,
+                           mask, addr7);
+               if (data == -1)
+                       goto error;
+
+               selVal = (value2 << 18) | (value1 << 2) | 2;
+
+               stride2 = rdmdio->stride_2;
+               *data_ptr++ = selVal;
+               *data_ptr++ = data;
+
+               value1 = value1 + stride2;
+               *d_ptr = data_ptr;
+       }
+
+       return 0;
+
+error:
+       return -1;
+}
+
+static uint32_t qla8044_minidump_process_pollwr(struct scsi_qla_host *vha,
+               struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+       uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+       uint32_t wait_count = 0;
+       struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+
+       pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+       addr1 = pollwr_hdr->addr_1;
+       addr2 = pollwr_hdr->addr_2;
+       value1 = pollwr_hdr->value_1;
+       value2 = pollwr_hdr->value_2;
+
+       poll = pollwr_hdr->poll;
+       mask = pollwr_hdr->mask;
+
+       while (wait_count < poll) {
+               qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+               if ((r_value & poll) != 0)
+                       break;
+               wait_count++;
+       }
+
+       if (wait_count == poll) {
+               ql_log(ql_log_warn, vha, 0xb156, "%s: TIMEOUT\n", __func__);
+               goto error;
+       }
+
+       qla8044_wr_reg_indirect(vha, addr2, value2);
+       qla8044_wr_reg_indirect(vha, addr1, value1);
+
+       wait_count = 0;
+       while (wait_count < poll) {
+               qla8044_rd_reg_indirect(vha, addr1, &r_value);
+
+               if ((r_value & poll) != 0)
+                       break;
+               wait_count++;
+       }
+
+       return QLA_SUCCESS;
+
+error:
+       return -1;
+}
+
 /*
  *
  * qla8044_collect_md_data - Retrieve firmware minidump data.
@@ -3089,6 +3416,24 @@ qla8044_collect_md_data(struct scsi_qla_host *vha)
                        if (rval != QLA_SUCCESS)
                                qla8044_mark_entry_skipped(vha, entry_hdr, i);
                        break;
+               case QLA8044_RDDFE:
+                       rval = qla8044_minidump_process_rddfe(vha, entry_hdr,
+                           &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla8044_mark_entry_skipped(vha, entry_hdr, i);
+                       break;
+               case QLA8044_RDMDIO:
+                       rval = qla8044_minidump_process_rdmdio(vha, entry_hdr,
+                           &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla8044_mark_entry_skipped(vha, entry_hdr, i);
+                       break;
+               case QLA8044_POLLWR:
+                       rval = qla8044_minidump_process_pollwr(vha, entry_hdr,
+                           &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla8044_mark_entry_skipped(vha, entry_hdr, i);
+                       break;
                case QLA82XX_RDNOP:
                default:
                        qla8044_mark_entry_skipped(vha, entry_hdr, i);
@@ -3110,6 +3455,7 @@ skip_nxt_entry:
                    "Dump data mismatch: Data collected: "
                    "[0x%x], total_data_size:[0x%x]\n",
                    data_collected, ha->md_dump_size);
+               rval = QLA_FUNCTION_FAILED;
                goto md_failed;
        }
 
@@ -3134,10 +3480,12 @@ qla8044_get_minidump(struct scsi_qla_host *vha)
 
        if (!qla8044_collect_md_data(vha)) {
                ha->fw_dumped = 1;
+               ha->prev_minidump_failed = 0;
        } else {
                ql_log(ql_log_fatal, vha, 0xb0db,
                    "%s: Unable to collect minidump\n",
                    __func__);
+               ha->prev_minidump_failed = 1;
        }
 }
 
index 2ab2eabab90874918b70aebabcae53d8c7f4a0ea..ada36057d7cde4727653cf783b165d707d752e86 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 #define QLA8044_LINK_SPEED(f)          (0x36E0+(((f) >> 2) * 4))
 #define QLA8044_MAX_LINK_SPEED(f)       (0x36F0+(((f) / 4) * 4))
 #define QLA8044_LINK_SPEED_FACTOR      10
+#define QLA8044_FUN7_ACTIVE_INDEX      0x80
 
 /* FLASH API Defines */
 #define QLA8044_FLASH_MAX_WAIT_USEC    100
@@ -431,6 +432,50 @@ struct qla8044_minidump_entry_pollrd {
        uint32_t rsvd_1;
 } __packed;
 
+struct qla8044_minidump_entry_rddfe {
+       struct qla8044_minidump_entry_hdr h;
+       uint32_t addr_1;
+       uint32_t value;
+       uint8_t stride;
+       uint8_t stride2;
+       uint16_t count;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t modify_mask;
+       uint32_t data_size;
+       uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+       struct qla8044_minidump_entry_hdr h;
+
+       uint32_t addr_1;
+       uint32_t addr_2;
+       uint32_t value_1;
+       uint8_t stride_1;
+       uint8_t stride_2;
+       uint16_t count;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t value_2;
+       uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+       struct qla8044_minidump_entry_hdr h;
+       uint32_t addr_1;
+       uint32_t addr_2;
+       uint32_t value_1;
+       uint32_t value_2;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t data_size;
+       uint32_t rsvd;
+
+}  __packed;
+
 /* RDMUX2 Entry */
 struct qla8044_minidump_entry_rdmux2 {
        struct qla8044_minidump_entry_hdr h;
@@ -516,6 +561,9 @@ static const uint32_t qla8044_reg_tbl[] = {
 #define QLA8044_DBG_RSVD_ARRAY_LEN              8
 #define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN        16
 #define QLA8044_SS_PCI_INDEX                    0
+#define QLA8044_RDDFE          38
+#define QLA8044_RDMDIO         39
+#define QLA8044_POLLWR         40
 
 struct qla8044_minidump_template_hdr {
        uint32_t entry_type;
index afc84814e9bb3b5db7682c4e5b7751edd8b051e4..d96bfb55e57b25359b1180140e2973ae1af2a045 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -616,7 +616,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
 
        if (sp->flags & SRB_CRC_CTX_DSD_VALID) {
                /* List assured to be having elements */
-               qla2x00_clean_dsd_pool(ha, sp);
+               qla2x00_clean_dsd_pool(ha, sp, NULL);
                sp->flags &= ~SRB_CRC_CTX_DSD_VALID;
        }
 
@@ -781,7 +781,7 @@ static int
 qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd)
 {
 #define ABORT_POLLING_PERIOD   1000
-#define ABORT_WAIT_ITER                ((10 * 1000) / (ABORT_POLLING_PERIOD))
+#define ABORT_WAIT_ITER                ((2 * 1000) / (ABORT_POLLING_PERIOD))
        unsigned long wait_iter = ABORT_WAIT_ITER;
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
        struct qla_hw_data *ha = vha->hw;
@@ -844,11 +844,8 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
 }
 
 /*
- * qla2x00_wait_for_reset_ready
- *    Wait till the HBA is online after going through
- *    <= MAX_RETRIES_OF_ISP_ABORT  or
- *    finally HBA is disabled ie marked offline or flash
- *    operations are in progress.
+ * qla2x00_wait_for_hba_ready
+ * Wait till the HBA is ready before doing driver unload
  *
  * Input:
  *     ha - pointer to host adapter structure
@@ -857,35 +854,15 @@ qla2x00_wait_for_hba_online(scsi_qla_host_t *vha)
  *    Does context switching-Release SPIN_LOCK
  *    (if any) before calling this routine.
  *
- * Return:
- *    Success (Adapter is online/no flash ops) : 0
- *    Failed  (Adapter is offline/disabled/flash ops in progress) : 1
  */
-static int
-qla2x00_wait_for_reset_ready(scsi_qla_host_t *vha)
+static void
+qla2x00_wait_for_hba_ready(scsi_qla_host_t *vha)
 {
-       int             return_status;
-       unsigned long   wait_online;
        struct qla_hw_data *ha = vha->hw;
-       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
-       wait_online = jiffies + (MAX_LOOP_TIMEOUT * HZ);
-       while (((test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags)) ||
-           test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
-           test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
-           ha->optrom_state != QLA_SWAITING ||
-           ha->dpc_active) && time_before(jiffies, wait_online))
+       while ((!(vha->flags.online) || ha->dpc_active ||
+           ha->flags.mbox_busy))
                msleep(1000);
-
-       if (base_vha->flags.online &&  ha->optrom_state == QLA_SWAITING)
-               return_status = QLA_SUCCESS;
-       else
-               return_status = QLA_FUNCTION_FAILED;
-
-       ql_dbg(ql_dbg_taskm, vha, 0x8019,
-           "%s return status=%d.\n", __func__, return_status);
-
-       return return_status;
 }
 
 int
@@ -945,7 +922,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        int ret;
        unsigned int id, lun;
        unsigned long flags;
-       int wait = 0;
+       int rval, wait = 0;
        struct qla_hw_data *ha = vha->hw;
 
        if (!CMD_SP(cmd))
@@ -974,10 +951,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        sp_get(sp);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       if (ha->isp_ops->abort_command(sp)) {
-               ret = FAILED;
+       rval = ha->isp_ops->abort_command(sp);
+       if (rval) {
+               if (rval == QLA_FUNCTION_PARAMETER_ERROR) {
+                       /*
+                        * Decrement the ref_count since we can't find the
+                        * command
+                        */
+                       atomic_dec(&sp->ref_count);
+                       ret = SUCCESS;
+               } else
+                       ret = FAILED;
+
                ql_dbg(ql_dbg_taskm, vha, 0x8003,
-                   "Abort command mbx failed cmd=%p.\n", cmd);
+                   "Abort command mbx failed cmd=%p, rval=%x.\n", cmd, rval);
        } else {
                ql_dbg(ql_dbg_taskm, vha, 0x8004,
                    "Abort command mbx success cmd=%p.\n", cmd);
@@ -985,6 +972,12 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        }
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
+       /*
+        * Clear the slot in the oustanding_cmds array if we can't find the
+        * command to reclaim the resources.
+        */
+       if (rval == QLA_FUNCTION_PARAMETER_ERROR)
+               vha->req->outstanding_cmds[sp->handle] = NULL;
        sp->done(ha, sp, 0);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -1236,7 +1229,11 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
        ql_log(ql_log_info, vha, 0x8018,
            "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
 
-       if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
+       /*
+        * No point in issuing another reset if one is active.  Also do not
+        * attempt a reset if we are updating flash.
+        */
+       if (qla2x00_reset_active(vha) || ha->optrom_state != QLA_SWAITING)
                goto eh_host_reset_lock;
 
        if (vha != base_vha) {
@@ -2270,6 +2267,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->device_type |= DT_IIDMA;
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
+       case PCI_DEVICE_ID_QLOGIC_ISP2271:
+               ha->device_type |= DT_ISP2271;
+               ha->device_type |= DT_ZIO_SUPPORTED;
+               ha->device_type |= DT_FWI2;
+               ha->device_type |= DT_IIDMA;
+               ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+               break;
        }
 
        if (IS_QLA82XX(ha))
@@ -2346,7 +2350,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
            pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044 ||
-           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071) {
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2071 ||
+           pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2271) {
                bars = pci_select_bars(pdev, IORESOURCE_MEM);
                mem_only = 1;
                ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2877,6 +2882,7 @@ skip_dpc:
 
        base_vha->flags.init_done = 1;
        base_vha->flags.online = 1;
+       ha->prev_minidump_failed = 0;
 
        ql_dbg(ql_dbg_init, base_vha, 0x00f2,
            "Init done and hba is online.\n");
@@ -3136,6 +3142,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
        base_vha = pci_get_drvdata(pdev);
        ha = base_vha->hw;
 
+       qla2x00_wait_for_hba_ready(base_vha);
+
        set_bit(UNLOADING, &base_vha->dpc_flags);
 
        if (IS_QLAFX00(ha))
@@ -3645,6 +3653,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha)
        ha->eft = NULL;
        ha->eft_dma = 0;
        ha->fw_dumped = 0;
+       ha->fw_dump_cap_flags = 0;
        ha->fw_dump_reading = 0;
        ha->fw_dump = NULL;
        ha->fw_dump_len = 0;
@@ -4913,12 +4922,13 @@ qla2x00_do_dpc(void *data)
                                if (qlafx00_reset_initialize(base_vha)) {
                                        /* Failed. Abort isp later. */
                                        if (!test_bit(UNLOADING,
-                                           &base_vha->dpc_flags))
+                                           &base_vha->dpc_flags)) {
                                                set_bit(ISP_UNRECOVERABLE,
                                                    &base_vha->dpc_flags);
                                                ql_dbg(ql_dbg_dpc, base_vha,
                                                    0x4021,
                                                    "Reset Recovery Failed\n");
+                                       }
                                }
                        }
 
@@ -5077,8 +5087,10 @@ intr_on_check:
                        ha->isp_ops->enable_intrs(ha);
 
                if (test_and_clear_bit(BEACON_BLINK_NEEDED,
-                                       &base_vha->dpc_flags))
-                       ha->isp_ops->beacon_blink(base_vha);
+                                       &base_vha->dpc_flags)) {
+                       if (ha->beacon_blink_led == 1)
+                               ha->isp_ops->beacon_blink(base_vha);
+               }
 
                if (!IS_QLAFX00(ha))
                        qla2x00_do_dpc_all_vps(base_vha);
@@ -5325,7 +5337,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
 #define FW_ISP82XX     7
 #define FW_ISP2031     8
 #define FW_ISP8031     9
-#define FW_ISP2071     10
+#define FW_ISP27XX     10
 
 #define FW_FILE_ISP21XX        "ql2100_fw.bin"
 #define FW_FILE_ISP22XX        "ql2200_fw.bin"
@@ -5337,7 +5349,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
 #define FW_FILE_ISP82XX        "ql8200_fw.bin"
 #define FW_FILE_ISP2031        "ql2600_fw.bin"
 #define FW_FILE_ISP8031        "ql8300_fw.bin"
-#define FW_FILE_ISP2071        "ql2700_fw.bin"
+#define FW_FILE_ISP27XX        "ql2700_fw.bin"
 
 
 static DEFINE_MUTEX(qla_fw_lock);
@@ -5353,7 +5365,7 @@ static struct fw_blob qla_fw_blobs[FW_BLOBS] = {
        { .name = FW_FILE_ISP82XX, },
        { .name = FW_FILE_ISP2031, },
        { .name = FW_FILE_ISP8031, },
-       { .name = FW_FILE_ISP2071, },
+       { .name = FW_FILE_ISP27XX, },
 };
 
 struct fw_blob *
@@ -5382,8 +5394,8 @@ qla2x00_request_firmware(scsi_qla_host_t *vha)
                blob = &qla_fw_blobs[FW_ISP2031];
        } else if (IS_QLA8031(ha)) {
                blob = &qla_fw_blobs[FW_ISP8031];
-       } else if (IS_QLA2071(ha)) {
-               blob = &qla_fw_blobs[FW_ISP2071];
+       } else if (IS_QLA27XX(ha)) {
+               blob = &qla_fw_blobs[FW_ISP27XX];
        } else {
                return NULL;
        }
@@ -5714,6 +5726,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
        { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2071) },
+       { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP2271) },
        { 0 },
 };
 MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
index 46ef0ac48f44f38d62d7df594ead31d12b1fdd42..2fb7ebfbbc381b66f6b493f4baedffadae2ed106 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
index f28123e8ed6511b52bc565826ac9edb0428988af..bca173e56f1610e3f854515ab24493a814ebfd03 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -1727,11 +1727,8 @@ qla83xx_beacon_blink(struct scsi_qla_host *vha)
        if (IS_QLA2031(ha)) {
                led_select_value = qla83xx_select_led_port(ha);
 
-               qla83xx_wr_reg(vha, led_select_value, 0x40002000);
-               qla83xx_wr_reg(vha, led_select_value + 4, 0x40002000);
-               msleep(1000);
-               qla83xx_wr_reg(vha, led_select_value, 0x40004000);
-               qla83xx_wr_reg(vha, led_select_value + 4, 0x40004000);
+               qla83xx_wr_reg(vha, led_select_value, 0x40000230);
+               qla83xx_wr_reg(vha, led_select_value + 4, 0x40000230);
        } else if (IS_QLA8031(ha)) {
                led_select_value = qla83xx_select_led_port(ha);
 
index 0cb73074c1997409c4e63f6d39ac51e641eff088..b1d10f9935c7caac0f85cc7cdb17ccee12c9e8df 100644 (file)
@@ -182,6 +182,11 @@ struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
        struct atio_from_isp *atio)
 {
+       ql_dbg(ql_dbg_tgt, vha, 0xe072,
+               "%s: qla_target(%d): type %x ox_id %04x\n",
+               __func__, vha->vp_idx, atio->u.raw.entry_type,
+               be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id));
+
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
        {
@@ -236,6 +241,10 @@ void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
 {
        switch (pkt->entry_type) {
+       case CTIO_CRC2:
+               ql_dbg(ql_dbg_tgt, vha, 0xe073,
+                       "qla_target(%d):%s: CRC2 Response pkt\n",
+                       vha->vp_idx, __func__);
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
@@ -1350,13 +1359,42 @@ static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
 
        prm->cmd->sg_mapped = 1;
 
-       /*
-        * If greater than four sg entries then we need to allocate
-        * the continuation entries
-        */
-       if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
-               prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
-                   prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
+       if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL) {
+               /*
+                * If greater than four sg entries then we need to allocate
+                * the continuation entries
+                */
+               if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
+                       prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
+                       prm->tgt->datasegs_per_cmd,
+                       prm->tgt->datasegs_per_cont);
+       } else {
+               /* DIF */
+               if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+                   (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+                       prm->seg_cnt = DIV_ROUND_UP(cmd->bufflen, cmd->blk_sz);
+                       prm->tot_dsds = prm->seg_cnt;
+               } else
+                       prm->tot_dsds = prm->seg_cnt;
+
+               if (cmd->prot_sg_cnt) {
+                       prm->prot_sg      = cmd->prot_sg;
+                       prm->prot_seg_cnt = pci_map_sg(prm->tgt->ha->pdev,
+                               cmd->prot_sg, cmd->prot_sg_cnt,
+                               cmd->dma_data_direction);
+                       if (unlikely(prm->prot_seg_cnt == 0))
+                               goto out_err;
+
+                       if ((cmd->se_cmd.prot_op == TARGET_PROT_DIN_INSERT) ||
+                           (cmd->se_cmd.prot_op == TARGET_PROT_DOUT_STRIP)) {
+                               /* Dif Bundling not support here */
+                               prm->prot_seg_cnt = DIV_ROUND_UP(cmd->bufflen,
+                                                               cmd->blk_sz);
+                               prm->tot_dsds += prm->prot_seg_cnt;
+                       } else
+                               prm->tot_dsds += prm->prot_seg_cnt;
+               }
+       }
 
        ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
            prm->seg_cnt, prm->req_cnt);
@@ -1377,6 +1415,16 @@ static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
        BUG_ON(!cmd->sg_mapped);
        pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
        cmd->sg_mapped = 0;
+
+       if (cmd->prot_sg_cnt)
+               pci_unmap_sg(ha->pdev, cmd->prot_sg, cmd->prot_sg_cnt,
+                       cmd->dma_data_direction);
+
+       if (cmd->ctx_dsd_alloced)
+               qla2x00_clean_dsd_pool(ha, NULL, cmd);
+
+       if (cmd->ctx)
+               dma_pool_free(ha->dl_dma_pool, cmd->ctx, cmd->ctx->crc_ctx_dma);
 }
 
 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
@@ -1665,8 +1713,9 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
                return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
        }
 
-       ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
-           vha->vp_idx, cmd->tag);
+       ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
+               vha->vp_idx, cmd->tag,
+               be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 
        prm->cmd = cmd;
        prm->tgt = tgt;
@@ -1902,6 +1951,323 @@ skip_explict_conf:
        /* Sense with len > 24, is it possible ??? */
 }
 
+
+
+/* diff  */
+static inline int
+qlt_hba_err_chk_enabled(struct se_cmd *se_cmd)
+{
+       /*
+        * Uncomment when corresponding SCSI changes are done.
+        *
+        if (!sp->cmd->prot_chk)
+        return 0;
+        *
+        */
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DOUT_INSERT:
+       case TARGET_PROT_DIN_STRIP:
+               if (ql2xenablehba_err_chk >= 1)
+                       return 1;
+               break;
+       case TARGET_PROT_DOUT_PASS:
+       case TARGET_PROT_DIN_PASS:
+               if (ql2xenablehba_err_chk >= 2)
+                       return 1;
+               break;
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_STRIP:
+               return 1;
+       default:
+               break;
+       }
+       return 0;
+}
+
+/*
+ * qla24xx_set_t10dif_tags_from_cmd - Extract Ref and App tags from SCSI command
+ *
+ */
+static inline void
+qlt_set_t10dif_tags(struct se_cmd *se_cmd, struct crc_context *ctx)
+{
+       uint32_t lba = 0xffffffff & se_cmd->t_task_lba;
+
+       /* wait til Mode Sense/Select cmd, modepage Ah, subpage 2
+        * have been immplemented by TCM, before AppTag is avail.
+        * Look for modesense_handlers[]
+        */
+       ctx->app_tag = __constant_cpu_to_le16(0);
+       ctx->app_tag_mask[0] = 0x0;
+       ctx->app_tag_mask[1] = 0x0;
+
+       switch (se_cmd->prot_type) {
+       case TARGET_DIF_TYPE0_PROT:
+               /*
+                * No check for ql2xenablehba_err_chk, as it would be an
+                * I/O error if hba tag generation is not done.
+                */
+               ctx->ref_tag = cpu_to_le32(lba);
+
+               if (!qlt_hba_err_chk_enabled(se_cmd))
+                       break;
+
+               /* enable ALL bytes of the ref tag */
+               ctx->ref_tag_mask[0] = 0xff;
+               ctx->ref_tag_mask[1] = 0xff;
+               ctx->ref_tag_mask[2] = 0xff;
+               ctx->ref_tag_mask[3] = 0xff;
+               break;
+       /*
+        * For TYpe 1 protection: 16 bit GUARD tag, 32 bit REF tag, and
+        * 16 bit app tag.
+        */
+       case TARGET_DIF_TYPE1_PROT:
+               ctx->ref_tag = cpu_to_le32(lba);
+
+               if (!qlt_hba_err_chk_enabled(se_cmd))
+                       break;
+
+               /* enable ALL bytes of the ref tag */
+               ctx->ref_tag_mask[0] = 0xff;
+               ctx->ref_tag_mask[1] = 0xff;
+               ctx->ref_tag_mask[2] = 0xff;
+               ctx->ref_tag_mask[3] = 0xff;
+               break;
+       /*
+        * For TYPE 2 protection: 16 bit GUARD + 32 bit REF tag has to
+        * match LBA in CDB + N
+        */
+       case TARGET_DIF_TYPE2_PROT:
+               ctx->ref_tag = cpu_to_le32(lba);
+
+               if (!qlt_hba_err_chk_enabled(se_cmd))
+                       break;
+
+               /* enable ALL bytes of the ref tag */
+               ctx->ref_tag_mask[0] = 0xff;
+               ctx->ref_tag_mask[1] = 0xff;
+               ctx->ref_tag_mask[2] = 0xff;
+               ctx->ref_tag_mask[3] = 0xff;
+               break;
+
+       /* For Type 3 protection: 16 bit GUARD only */
+       case TARGET_DIF_TYPE3_PROT:
+               ctx->ref_tag_mask[0] = ctx->ref_tag_mask[1] =
+                       ctx->ref_tag_mask[2] = ctx->ref_tag_mask[3] = 0x00;
+               break;
+       }
+}
+
+
+static inline int
+qlt_build_ctio_crc2_pkt(struct qla_tgt_prm *prm, scsi_qla_host_t *vha)
+{
+       uint32_t                *cur_dsd;
+       int                     sgc;
+       uint32_t                transfer_length = 0;
+       uint32_t                data_bytes;
+       uint32_t                dif_bytes;
+       uint8_t                 bundling = 1;
+       uint8_t                 *clr_ptr;
+       struct crc_context      *crc_ctx_pkt = NULL;
+       struct qla_hw_data      *ha;
+       struct ctio_crc2_to_fw  *pkt;
+       dma_addr_t              crc_ctx_dma;
+       uint16_t                fw_prot_opts = 0;
+       struct qla_tgt_cmd      *cmd = prm->cmd;
+       struct se_cmd           *se_cmd = &cmd->se_cmd;
+       uint32_t h;
+       struct atio_from_isp *atio = &prm->cmd->atio;
+
+       sgc = 0;
+       ha = vha->hw;
+
+       pkt = (struct ctio_crc2_to_fw *)vha->req->ring_ptr;
+       prm->pkt = pkt;
+       memset(pkt, 0, sizeof(*pkt));
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe071,
+               "qla_target(%d):%s: se_cmd[%p] CRC2 prot_op[0x%x] cmd prot sg:cnt[%p:%x] lba[%llu]\n",
+               vha->vp_idx, __func__, se_cmd, se_cmd->prot_op,
+               prm->prot_sg, prm->prot_seg_cnt, se_cmd->t_task_lba);
+
+       if ((se_cmd->prot_op == TARGET_PROT_DIN_INSERT) ||
+           (se_cmd->prot_op == TARGET_PROT_DOUT_STRIP))
+               bundling = 0;
+
+       /* Compute dif len and adjust data len to incude protection */
+       data_bytes = cmd->bufflen;
+       dif_bytes  = (data_bytes / cmd->blk_sz) * 8;
+
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_STRIP:
+               transfer_length = data_bytes;
+               data_bytes += dif_bytes;
+               break;
+
+       case TARGET_PROT_DIN_STRIP:
+       case TARGET_PROT_DOUT_INSERT:
+       case TARGET_PROT_DIN_PASS:
+       case TARGET_PROT_DOUT_PASS:
+               transfer_length = data_bytes + dif_bytes;
+               break;
+
+       default:
+               BUG();
+               break;
+       }
+
+       if (!qlt_hba_err_chk_enabled(se_cmd))
+               fw_prot_opts |= 0x10; /* Disable Guard tag checking */
+       /* HBA error checking enabled */
+       else if (IS_PI_UNINIT_CAPABLE(ha)) {
+               if ((se_cmd->prot_type == TARGET_DIF_TYPE1_PROT) ||
+                   (se_cmd->prot_type == TARGET_DIF_TYPE2_PROT))
+                       fw_prot_opts |= PO_DIS_VALD_APP_ESC;
+               else if (se_cmd->prot_type == TARGET_DIF_TYPE3_PROT)
+                       fw_prot_opts |= PO_DIS_VALD_APP_REF_ESC;
+       }
+
+       switch (se_cmd->prot_op) {
+       case TARGET_PROT_DIN_INSERT:
+       case TARGET_PROT_DOUT_INSERT:
+               fw_prot_opts |= PO_MODE_DIF_INSERT;
+               break;
+       case TARGET_PROT_DIN_STRIP:
+       case TARGET_PROT_DOUT_STRIP:
+               fw_prot_opts |= PO_MODE_DIF_REMOVE;
+               break;
+       case TARGET_PROT_DIN_PASS:
+       case TARGET_PROT_DOUT_PASS:
+               fw_prot_opts |= PO_MODE_DIF_PASS;
+               /* FUTURE: does tcm require T10CRC<->IPCKSUM conversion? */
+               break;
+       default:/* Normal Request */
+               fw_prot_opts |= PO_MODE_DIF_PASS;
+               break;
+       }
+
+
+       /* ---- PKT ---- */
+       /* Update entry type to indicate Command Type CRC_2 IOCB */
+       pkt->entry_type  = CTIO_CRC2;
+       pkt->entry_count = 1;
+       pkt->vp_index = vha->vp_idx;
+
+       h = qlt_make_handle(vha);
+       if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
+               /*
+                * CTIO type 7 from the firmware doesn't provide a way to
+                * know the initiator's LOOP ID, hence we can't find
+                * the session and, so, the command.
+                */
+               return -EAGAIN;
+       } else
+               ha->tgt.cmds[h-1] = prm->cmd;
+
+
+       pkt->handle  = h | CTIO_COMPLETION_HANDLE_MARK;
+       pkt->nport_handle = prm->cmd->loop_id;
+       pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
+       pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
+       pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
+       pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
+       pkt->exchange_addr   = atio->u.isp24.exchange_addr;
+       pkt->ox_id  = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       pkt->flags |= (atio->u.isp24.attr << 9);
+       pkt->relative_offset = cpu_to_le32(prm->cmd->offset);
+
+       /* Set transfer direction */
+       if (cmd->dma_data_direction == DMA_TO_DEVICE)
+               pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN);
+       else if (cmd->dma_data_direction == DMA_FROM_DEVICE)
+               pkt->flags = __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT);
+
+
+       pkt->dseg_count = prm->tot_dsds;
+       /* Fibre channel byte count */
+       pkt->transfer_length = cpu_to_le32(transfer_length);
+
+
+       /* ----- CRC context -------- */
+
+       /* Allocate CRC context from global pool */
+       crc_ctx_pkt = cmd->ctx =
+           dma_pool_alloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
+
+       if (!crc_ctx_pkt)
+               goto crc_queuing_error;
+
+       /* Zero out CTX area. */
+       clr_ptr = (uint8_t *)crc_ctx_pkt;
+       memset(clr_ptr, 0, sizeof(*crc_ctx_pkt));
+
+       crc_ctx_pkt->crc_ctx_dma = crc_ctx_dma;
+       INIT_LIST_HEAD(&crc_ctx_pkt->dsd_list);
+
+       /* Set handle */
+       crc_ctx_pkt->handle = pkt->handle;
+
+       qlt_set_t10dif_tags(se_cmd, crc_ctx_pkt);
+
+       pkt->crc_context_address[0] = cpu_to_le32(LSD(crc_ctx_dma));
+       pkt->crc_context_address[1] = cpu_to_le32(MSD(crc_ctx_dma));
+       pkt->crc_context_len = CRC_CONTEXT_LEN_FW;
+
+
+       if (!bundling) {
+               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.nobundling.data_address;
+       } else {
+               /*
+                * Configure Bundling if we need to fetch interlaving
+                * protection PCI accesses
+                */
+               fw_prot_opts |= PO_ENABLE_DIF_BUNDLING;
+               crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
+               crc_ctx_pkt->u.bundling.dseg_count =
+                       cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt);
+               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.data_address;
+       }
+
+       /* Finish the common fields of CRC pkt */
+       crc_ctx_pkt->blk_size   = cpu_to_le16(cmd->blk_sz);
+       crc_ctx_pkt->prot_opts  = cpu_to_le16(fw_prot_opts);
+       crc_ctx_pkt->byte_count = cpu_to_le32(data_bytes);
+       crc_ctx_pkt->guard_seed = __constant_cpu_to_le16(0);
+
+
+       /* Walks data segments */
+       pkt->flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DSD_PTR);
+
+       if (!bundling && prm->prot_seg_cnt) {
+               if (qla24xx_walk_and_build_sglist_no_difb(ha, NULL, cur_dsd,
+                       prm->tot_dsds, cmd))
+                       goto crc_queuing_error;
+       } else if (qla24xx_walk_and_build_sglist(ha, NULL, cur_dsd,
+               (prm->tot_dsds - prm->prot_seg_cnt), cmd))
+               goto crc_queuing_error;
+
+       if (bundling && prm->prot_seg_cnt) {
+               /* Walks dif segments */
+               pkt->add_flags |=
+                       __constant_cpu_to_le16(CTIO_CRC2_AF_DIF_DSD_ENA);
+
+               cur_dsd = (uint32_t *) &crc_ctx_pkt->u.bundling.dif_address;
+               if (qla24xx_walk_and_build_prot_sglist(ha, NULL, cur_dsd,
+                       prm->prot_seg_cnt, cmd))
+                       goto crc_queuing_error;
+       }
+       return QLA_SUCCESS;
+
+crc_queuing_error:
+       /* Cleanup will be performed by the caller */
+
+       return QLA_FUNCTION_FAILED;
+}
+
+
 /*
  * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
  * QLA_TGT_XMIT_STATUS for >= 24xx silicon
@@ -1921,9 +2287,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        qlt_check_srr_debug(cmd, &xmit_type);
 
        ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
-           "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
-           "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
-           1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
+           "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, cmd->dma_data_direction=%d se_cmd[%p]\n",
+           (xmit_type & QLA_TGT_XMIT_STATUS) ?
+           1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction,
+           &cmd->se_cmd);
 
        res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
            &full_req_cnt);
@@ -1941,7 +2308,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
        if (unlikely(res))
                goto out_unmap_unlock;
 
-       res = qlt_24xx_build_ctio_pkt(&prm, vha);
+       if (cmd->se_cmd.prot_op && (xmit_type & QLA_TGT_XMIT_DATA))
+               res = qlt_build_ctio_crc2_pkt(&prm, vha);
+       else
+               res = qlt_24xx_build_ctio_pkt(&prm, vha);
        if (unlikely(res != 0))
                goto out_unmap_unlock;
 
@@ -1953,7 +2323,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                    __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
                        CTIO7_FLAGS_STATUS_MODE_0);
 
-               qlt_load_data_segments(&prm, vha);
+               if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+                       qlt_load_data_segments(&prm, vha);
 
                if (prm.add_status_pkt == 0) {
                        if (xmit_type & QLA_TGT_XMIT_STATUS) {
@@ -1983,8 +2354,14 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        ql_dbg(ql_dbg_tgt, vha, 0xe019,
                            "Building additional status packet\n");
 
+                       /*
+                        * T10Dif: ctio_crc2_to_fw overlay ontop of
+                        * ctio7_to_24xx
+                        */
                        memcpy(ctio, pkt, sizeof(*ctio));
+                       /* reset back to CTIO7 */
                        ctio->entry_count = 1;
+                       ctio->entry_type = CTIO_TYPE7;
                        ctio->dseg_count = 0;
                        ctio->u.status1.flags &= ~__constant_cpu_to_le16(
                            CTIO7_FLAGS_DATA_IN);
@@ -1993,6 +2370,11 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
                        pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
                        pkt->u.status0.flags |= __constant_cpu_to_le16(
                            CTIO7_FLAGS_DONT_RET_CTIO);
+
+                       /* qlt_24xx_init_ctio_to_isp will correct
+                        * all neccessary fields that's part of CTIO7.
+                        * There should be no residual of CTIO-CRC2 data.
+                        */
                        qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
                            &prm);
                        pr_debug("Status CTIO7: %p\n", ctio);
@@ -2041,8 +2423,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
                return -EIO;
 
-       ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
-           (int)vha->vp_idx);
+       ql_dbg(ql_dbg_tgt, vha, 0xe01b,
+               "%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
+               __func__, (int)vha->vp_idx, &cmd->se_cmd,
+               be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 
        /* Calculate number of entries and segments required */
        if (qlt_pci_map_calc_cnt(&prm) != 0)
@@ -2054,14 +2438,19 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
        res = qlt_check_reserve_free_req(vha, prm.req_cnt);
        if (res != 0)
                goto out_unlock_free_unmap;
+       if (cmd->se_cmd.prot_op)
+               res = qlt_build_ctio_crc2_pkt(&prm, vha);
+       else
+               res = qlt_24xx_build_ctio_pkt(&prm, vha);
 
-       res = qlt_24xx_build_ctio_pkt(&prm, vha);
        if (unlikely(res != 0))
                goto out_unlock_free_unmap;
        pkt = (struct ctio7_to_24xx *)prm.pkt;
        pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
            CTIO7_FLAGS_STATUS_MODE_0);
-       qlt_load_data_segments(&prm, vha);
+
+       if (cmd->se_cmd.prot_op == TARGET_PROT_NORMAL)
+               qlt_load_data_segments(&prm, vha);
 
        cmd->state = QLA_TGT_STATE_NEED_DATA;
 
@@ -2079,6 +2468,143 @@ out_unlock_free_unmap:
 }
 EXPORT_SYMBOL(qlt_rdy_to_xfer);
 
+
+/*
+ * Checks the guard or meta-data for the type of error
+ * detected by the HBA.
+ */
+static inline int
+qlt_handle_dif_error(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd,
+               struct ctio_crc_from_fw *sts)
+{
+       uint8_t         *ap = &sts->actual_dif[0];
+       uint8_t         *ep = &sts->expected_dif[0];
+       uint32_t        e_ref_tag, a_ref_tag;
+       uint16_t        e_app_tag, a_app_tag;
+       uint16_t        e_guard, a_guard;
+       uint64_t        lba = cmd->se_cmd.t_task_lba;
+
+       a_guard   = be16_to_cpu(*(uint16_t *)(ap + 0));
+       a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2));
+       a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4));
+
+       e_guard   = be16_to_cpu(*(uint16_t *)(ep + 0));
+       e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2));
+       e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4));
+
+       ql_dbg(ql_dbg_tgt, vha, 0xe075,
+           "iocb(s) %p Returned STATUS.\n", sts);
+
+       ql_dbg(ql_dbg_tgt, vha, 0xf075,
+           "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+           cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+           a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
+
+       /*
+        * Ignore sector if:
+        * For type     3: ref & app tag is all 'f's
+        * For type 0,1,2: app tag is all 'f's
+        */
+       if ((a_app_tag == 0xffff) &&
+           ((cmd->se_cmd.prot_type != TARGET_DIF_TYPE3_PROT) ||
+            (a_ref_tag == 0xffffffff))) {
+               uint32_t blocks_done;
+
+               /* 2TB boundary case covered automatically with this */
+               blocks_done = e_ref_tag - (uint32_t)lba + 1;
+               cmd->se_cmd.bad_sector = e_ref_tag;
+               cmd->se_cmd.pi_err = 0;
+               ql_dbg(ql_dbg_tgt, vha, 0xf074,
+                       "need to return scsi good\n");
+
+               /* Update protection tag */
+               if (cmd->prot_sg_cnt) {
+                       uint32_t i, j = 0, k = 0, num_ent;
+                       struct scatterlist *sg, *sgl;
+
+
+                       sgl = cmd->prot_sg;
+
+                       /* Patch the corresponding protection tags */
+                       for_each_sg(sgl, sg, cmd->prot_sg_cnt, i) {
+                               num_ent = sg_dma_len(sg) / 8;
+                               if (k + num_ent < blocks_done) {
+                                       k += num_ent;
+                                       continue;
+                               }
+                               j = blocks_done - k - 1;
+                               k = blocks_done;
+                               break;
+                       }
+
+                       if (k != blocks_done) {
+                               ql_log(ql_log_warn, vha, 0xf076,
+                                   "unexpected tag values tag:lba=%u:%llu)\n",
+                                   e_ref_tag, (unsigned long long)lba);
+                               goto out;
+                       }
+
+#if 0
+                       struct sd_dif_tuple *spt;
+                       /* TODO:
+                        * This section came from initiator. Is it valid here?
+                        * should ulp be override with actual val???
+                        */
+                       spt = page_address(sg_page(sg)) + sg->offset;
+                       spt += j;
+
+                       spt->app_tag = 0xffff;
+                       if (cmd->se_cmd.prot_type == SCSI_PROT_DIF_TYPE3)
+                               spt->ref_tag = 0xffffffff;
+#endif
+               }
+
+               return 0;
+       }
+
+       /* check guard */
+       if (e_guard != a_guard) {
+               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED;
+               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+               ql_log(ql_log_warn, vha, 0xe076,
+                   "Guard ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+                   cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+                   a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+                   a_guard, e_guard, cmd);
+               goto out;
+       }
+
+       /* check ref tag */
+       if (e_ref_tag != a_ref_tag) {
+               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED;
+               cmd->se_cmd.bad_sector = e_ref_tag;
+
+               ql_log(ql_log_warn, vha, 0xe077,
+                       "Ref Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+                       a_guard, e_guard, cmd);
+               goto out;
+       }
+
+       /* check appl tag */
+       if (e_app_tag != a_app_tag) {
+               cmd->se_cmd.pi_err = TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED;
+               cmd->se_cmd.bad_sector = cmd->se_cmd.t_task_lba;
+
+               ql_log(ql_log_warn, vha, 0xe078,
+                       "App Tag ERR: cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x] cmd=%p\n",
+                       cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
+                       a_ref_tag, e_ref_tag, a_app_tag, e_app_tag,
+                       a_guard, e_guard, cmd);
+               goto out;
+       }
+out:
+       return 1;
+}
+
+
 /* If hardware_lock held on entry, might drop it, then reaquire */
 /* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
 static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
@@ -2155,18 +2681,36 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
        rc = __qlt_send_term_exchange(vha, cmd, atio);
        spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
 done:
-       if (rc == 1) {
+       /*
+        * Terminate exchange will tell fw to release any active CTIO
+        * that's in FW posession and cleanup the exchange.
+        *
+        * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
+        * down at FW.  Free the cmd later when CTIO comes back later
+        * w/aborted(0x2) status.
+        *
+        * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
+        * back w/some err.  Free the cmd now.
+        */
+       if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
                if (!ha_locked && !in_interrupt())
                        msleep(250); /* just in case */
 
+               if (cmd->sg_mapped)
+                       qlt_unmap_sg(vha, cmd);
                vha->hw->tgt.tgt_ops->free_cmd(cmd);
        }
+       return;
 }
 
 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 {
-       BUG_ON(cmd->sg_mapped);
+       ql_dbg(ql_dbg_tgt, cmd->vha, 0xe074,
+           "%s: se_cmd[%p] ox_id %04x\n",
+           __func__, &cmd->se_cmd,
+           be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 
+       BUG_ON(cmd->sg_mapped);
        if (unlikely(cmd->free_sg))
                kfree(cmd->sg);
        kmem_cache_free(qla_tgt_cmd_cachep, cmd);
@@ -2374,6 +2918,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                case CTIO_LIP_RESET:
                case CTIO_TARGET_RESET:
                case CTIO_ABORTED:
+                       /* driver request abort via Terminate exchange */
                case CTIO_TIMEOUT:
                case CTIO_INVALID_RX_ID:
                        /* They are OK */
@@ -2404,18 +2949,58 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                        else
                                return;
 
+               case CTIO_DIF_ERROR: {
+                       struct ctio_crc_from_fw *crc =
+                               (struct ctio_crc_from_fw *)ctio;
+                       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf073,
+                           "qla_target(%d): CTIO with DIF_ERROR status %x received (state %x, se_cmd %p) actual_dif[0x%llx] expect_dif[0x%llx]\n",
+                           vha->vp_idx, status, cmd->state, se_cmd,
+                           *((u64 *)&crc->actual_dif[0]),
+                           *((u64 *)&crc->expected_dif[0]));
+
+                       if (qlt_handle_dif_error(vha, cmd, ctio)) {
+                               if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+                                       /* scsi Write/xfer rdy complete */
+                                       goto skip_term;
+                               } else {
+                                       /* scsi read/xmit respond complete
+                                        * call handle dif to send scsi status
+                                        * rather than terminate exchange.
+                                        */
+                                       cmd->state = QLA_TGT_STATE_PROCESSED;
+                                       ha->tgt.tgt_ops->handle_dif_err(cmd);
+                                       return;
+                               }
+                       } else {
+                               /* Need to generate a SCSI good completion.
+                                * because FW did not send scsi status.
+                                */
+                               status = 0;
+                               goto skip_term;
+                       }
+                       break;
+               }
                default:
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
-                           "qla_target(%d): CTIO with error status "
-                           "0x%x received (state %x, se_cmd %p\n",
+                           "qla_target(%d): CTIO with error status 0x%x received (state %x, se_cmd %p\n",
                            vha->vp_idx, status, cmd->state, se_cmd);
                        break;
                }
 
-               if (cmd->state != QLA_TGT_STATE_NEED_DATA)
+
+               /* "cmd->state == QLA_TGT_STATE_ABORTED" means
+                * cmd is already aborted/terminated, we don't
+                * need to terminate again.  The exchange is already
+                * cleaned up/freed at FW level.  Just cleanup at driver
+                * level.
+                */
+               if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
+                       (cmd->state != QLA_TGT_STATE_ABORTED)) {
                        if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
                                return;
+               }
        }
+skip_term:
 
        if (cmd->state == QLA_TGT_STATE_PROCESSED) {
                ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
@@ -2444,7 +3029,8 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
                    "not return a CTIO complete\n", vha->vp_idx, cmd->state);
        }
 
-       if (unlikely(status != CTIO_SUCCESS)) {
+       if (unlikely(status != CTIO_SUCCESS) &&
+               (cmd->state != QLA_TGT_STATE_ABORTED)) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
                dump_stack();
        }
@@ -2563,8 +3149,9 @@ static void qlt_do_work(struct work_struct *work)
            atio->u.isp24.fcp_cmnd.add_cdb_len]));
 
        ql_dbg(ql_dbg_tgt, vha, 0xe022,
-           "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
-           cmd, cmd->unpacked_lun, cmd->tag);
+               "qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
+               cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
+               cmd->atio.u.isp24.fcp_hdr.ox_id);
 
        ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
            fcp_task_attr, data_dir, bidi);
@@ -3527,11 +4114,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
        switch (atio->u.raw.entry_type) {
        case ATIO_TYPE7:
                ql_dbg(ql_dbg_tgt, vha, 0xe02d,
-                   "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
-                   "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
+                   "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
                    vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
                    atio->u.isp24.fcp_cmnd.rddata,
                    atio->u.isp24.fcp_cmnd.wrdata,
+                   atio->u.isp24.fcp_cmnd.cdb[0],
                    atio->u.isp24.fcp_cmnd.add_cdb_len,
                    be32_to_cpu(get_unaligned((uint32_t *)
                        &atio->u.isp24.fcp_cmnd.add_cdb[
@@ -3629,11 +4216,13 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
        tgt->irq_cmd_count++;
 
        switch (pkt->entry_type) {
+       case CTIO_CRC2:
        case CTIO_TYPE7:
        {
                struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
-               ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
-                   vha->vp_idx);
+               ql_dbg(ql_dbg_tgt, vha, 0xe030,
+                       "CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
+                       entry->entry_type, vha->vp_idx);
                qlt_do_ctio_completion(vha, entry->handle,
                    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
                    entry);
@@ -4768,6 +5357,7 @@ qlt_24xx_process_response_error(struct scsi_qla_host *vha,
        case ABTS_RESP_24XX:
        case CTIO_TYPE7:
        case NOTIFY_ACK_TYPE:
+       case CTIO_CRC2:
                return 1;
        default:
                return 0;
index ce33d8c26406da00ccb67943093bae5f3fdec0b9..f873e10451d29758ffd65b87a7b400af301e38c7 100644 (file)
@@ -293,6 +293,7 @@ struct ctio_to_2xxx {
 #define CTIO_ABORTED                   0x02
 #define CTIO_INVALID_RX_ID             0x08
 #define CTIO_TIMEOUT                   0x0B
+#define CTIO_DIF_ERROR                 0x0C     /* DIF error detected  */
 #define CTIO_LIP_RESET                 0x0E
 #define CTIO_TARGET_RESET              0x17
 #define CTIO_PORT_UNAVAILABLE          0x28
@@ -498,11 +499,12 @@ struct ctio7_from_24xx {
 #define CTIO7_FLAGS_DONT_RET_CTIO      BIT_8
 #define CTIO7_FLAGS_STATUS_MODE_0      0
 #define CTIO7_FLAGS_STATUS_MODE_1      BIT_6
+#define CTIO7_FLAGS_STATUS_MODE_2      BIT_7
 #define CTIO7_FLAGS_EXPLICIT_CONFORM   BIT_5
 #define CTIO7_FLAGS_CONFIRM_SATISF     BIT_4
 #define CTIO7_FLAGS_DSD_PTR            BIT_2
-#define CTIO7_FLAGS_DATA_IN            BIT_1
-#define CTIO7_FLAGS_DATA_OUT           BIT_0
+#define CTIO7_FLAGS_DATA_IN            BIT_1 /* data to initiator */
+#define CTIO7_FLAGS_DATA_OUT           BIT_0 /* data from initiator */
 
 #define ELS_PLOGI                      0x3
 #define ELS_FLOGI                      0x4
@@ -513,6 +515,68 @@ struct ctio7_from_24xx {
 #define ELS_PDISC                      0x50
 #define ELS_ADISC                      0x52
 
+/*
+ *CTIO Type CRC_2 IOCB
+ */
+struct ctio_crc2_to_fw {
+       uint8_t entry_type;             /* Entry type. */
+#define CTIO_CRC2 0x7A
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint16_t nport_handle;          /* N_PORT handle. */
+       uint16_t timeout;               /* Command timeout. */
+
+       uint16_t dseg_count;            /* Data segment count. */
+       uint8_t  vp_index;
+       uint8_t  add_flags;             /* additional flags */
+#define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3
+
+       uint8_t  initiator_id[3];       /* initiator ID */
+       uint8_t  reserved1;
+       uint32_t exchange_addr;         /* rcv exchange address */
+       uint16_t reserved2;
+       uint16_t flags;                 /* refer to CTIO7 flags values */
+       uint32_t residual;
+       uint16_t ox_id;
+       uint16_t scsi_status;
+       uint32_t relative_offset;
+       uint32_t reserved5;
+       uint32_t transfer_length;               /* total fc transfer length */
+       uint32_t reserved6;
+       uint32_t crc_context_address[2];/* Data segment address. */
+       uint16_t crc_context_len;       /* Data segment length. */
+       uint16_t reserved_1;            /* MUST be set to 0. */
+} __packed;
+
+/* CTIO Type CRC_x Status IOCB */
+struct ctio_crc_from_fw {
+       uint8_t entry_type;             /* Entry type. */
+       uint8_t entry_count;            /* Entry count. */
+       uint8_t sys_define;             /* System defined. */
+       uint8_t entry_status;           /* Entry Status. */
+
+       uint32_t handle;                /* System handle. */
+       uint16_t status;
+       uint16_t timeout;               /* Command timeout. */
+       uint16_t dseg_count;            /* Data segment count. */
+       uint32_t reserved1;
+       uint16_t state_flags;
+#define CTIO_CRC_SF_DIF_CHOPPED BIT_4
+
+       uint32_t exchange_address;      /* rcv exchange address */
+       uint16_t reserved2;
+       uint16_t flags;
+       uint32_t resid_xfer_length;
+       uint16_t ox_id;
+       uint8_t  reserved3[12];
+       uint16_t runt_guard;            /* reported runt blk guard */
+       uint8_t  actual_dif[8];
+       uint8_t  expected_dif[8];
+} __packed;
+
 /*
  * ISP queue - ABTS received/response entries structure definition for 24xx.
  */
@@ -641,6 +705,7 @@ struct qla_tgt_func_tmpl {
        int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
                        unsigned char *, uint32_t, int, int, int);
        void (*handle_data)(struct qla_tgt_cmd *);
+       void (*handle_dif_err)(struct qla_tgt_cmd *);
        int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
                        uint32_t);
        void (*free_cmd)(struct qla_tgt_cmd *);
@@ -829,9 +894,9 @@ struct qla_tgt_sess {
 };
 
 struct qla_tgt_cmd {
+       struct se_cmd se_cmd;
        struct qla_tgt_sess *sess;
        int state;
-       struct se_cmd se_cmd;
        struct work_struct free_work;
        struct work_struct work;
        /* Sense buffer that will be mapped into outgoing status */
@@ -843,6 +908,7 @@ struct qla_tgt_cmd {
        unsigned int free_sg:1;
        unsigned int aborted:1; /* Needed in case of SRR */
        unsigned int write_data_transferred:1;
+       unsigned int ctx_dsd_alloced:1;
 
        struct scatterlist *sg; /* cmd data buffer SG vector */
        int sg_cnt;             /* SG segments count */
@@ -857,6 +923,12 @@ struct qla_tgt_cmd {
        struct scsi_qla_host *vha;
 
        struct atio_from_isp atio;
+       /* t10dif */
+       struct scatterlist *prot_sg;
+       uint32_t prot_sg_cnt;
+       uint32_t blk_sz;
+       struct crc_context *ctx;
+
 };
 
 struct qla_tgt_sess_work_param {
@@ -901,6 +973,10 @@ struct qla_tgt_prm {
        int sense_buffer_len;
        int residual;
        int add_status_pkt;
+       /* dif */
+       struct scatterlist *prot_sg;
+       uint16_t prot_seg_cnt;
+       uint16_t tot_dsds;
 };
 
 struct qla_tgt_srr_imm {
@@ -976,6 +1052,8 @@ extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
 extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
 extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
 extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
+extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
+extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
 extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
 extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
index a804e9b744bbac4db8e5cbc7db82daeacc35a0b0..cb9a0c4bc4190adbf5e0f5b6b43e2efb3cfc6125 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -201,7 +201,6 @@ qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
                ql_dbg(ql_dbg_misc, NULL, 0xd014,
                    "%s: @%x\n", __func__, offset);
        }
-       qla27xx_insert32(offset, buf, len);
        qla27xx_read32(window, buf, len);
 }
 
@@ -220,7 +219,7 @@ qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
 
 static inline void
 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
-       uint32_t base, uint offset, uint count, uint width, void *buf,
+       uint32_t addr, uint offset, uint count, uint width, void *buf,
        ulong *len)
 {
        void *window = (void *)reg + offset;
@@ -229,14 +228,14 @@ qla27xx_read_window(__iomem struct device_reg_24xx *reg,
        if (buf) {
                ql_dbg(ql_dbg_misc, NULL, 0xd016,
                    "%s: base=%x offset=%x count=%x width=%x\n",
-                   __func__, base, offset, count, width);
+                   __func__, addr, offset, count, width);
        }
-       qla27xx_write_reg(reg, IOBASE_ADDR, base, buf);
+       qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
        while (count--) {
-               qla27xx_insert32(base, buf, len);
+               qla27xx_insert32(addr, buf, len);
                readn(window, buf, len);
                window += width;
-               base += width;
+               addr++;
        }
 }
 
@@ -336,7 +335,8 @@ qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
 
        ql_dbg(ql_dbg_misc, vha, 0xd204,
            "%s: rdpci [%lx]\n", __func__, *len);
-       qla27xx_read_reg(reg, ent->t260.pci_addr, buf, len);
+       qla27xx_insert32(ent->t260.pci_offset, buf, len);
+       qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
 
        return false;
 }
@@ -349,7 +349,7 @@ qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
 
        ql_dbg(ql_dbg_misc, vha, 0xd205,
            "%s: wrpci [%lx]\n", __func__, *len);
-       qla27xx_write_reg(reg, ent->t261.pci_addr, ent->t261.write_data, buf);
+       qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
 
        return false;
 }
@@ -392,9 +392,9 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
                goto done;
        }
 
-       if (end < start) {
+       if (end < start || end == 0) {
                ql_dbg(ql_dbg_misc, vha, 0xd023,
-                   "%s: bad range (start=%x end=%x)\n", __func__,
+                   "%s: unusable range (start=%x end=%x)\n", __func__,
                    ent->t262.end_addr, ent->t262.start_addr);
                qla27xx_skip_entry(ent, buf);
                goto done;
@@ -452,17 +452,15 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
                ql_dbg(ql_dbg_misc, vha, 0xd025,
                    "%s: unsupported atio queue\n", __func__);
                qla27xx_skip_entry(ent, buf);
-               goto done;
        } else {
                ql_dbg(ql_dbg_misc, vha, 0xd026,
                    "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
                qla27xx_skip_entry(ent, buf);
-               goto done;
        }
 
        if (buf)
                ent->t263.num_queues = count;
-done:
+
        return false;
 }
 
@@ -503,7 +501,7 @@ qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
        ql_dbg(ql_dbg_misc, vha, 0xd209,
            "%s: pause risc [%lx]\n", __func__, *len);
        if (buf)
-               qla24xx_pause_risc(reg);
+               qla24xx_pause_risc(reg, vha->hw);
 
        return false;
 }
@@ -590,7 +588,6 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
        struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
        struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
-       void *window = (void *)reg + 0xc4;
        ulong dwords = ent->t270.count;
        ulong addr = ent->t270.addr;
 
@@ -599,10 +596,9 @@ qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
        qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
        while (dwords--) {
                qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
-               qla27xx_read_reg(reg, 0xc4, buf, len);
                qla27xx_insert32(addr, buf, len);
-               qla27xx_read32(window, buf, len);
-               addr++;
+               qla27xx_read_reg(reg, 0xc4, buf, len);
+               addr += sizeof(uint32_t);
        }
 
        return false;
@@ -614,12 +610,12 @@ qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
 {
        struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
        ulong addr = ent->t271.addr;
+       ulong data = ent->t271.data;
 
        ql_dbg(ql_dbg_misc, vha, 0xd20f,
            "%s: wrremreg [%lx]\n", __func__, *len);
        qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
-       qla27xx_read_reg(reg, 0xc4, buf, len);
-       qla27xx_insert32(addr, buf, len);
+       qla27xx_write_reg(reg, 0xc4, data, buf);
        qla27xx_write_reg(reg, 0xc0, addr, buf);
 
        return false;
@@ -662,9 +658,59 @@ qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
                            "%s: failed pcicfg read at %lx\n", __func__, addr);
                qla27xx_insert32(addr, buf, len);
                qla27xx_insert32(value, buf, len);
-               addr += 4;
+               addr += sizeof(uint32_t);
+       }
+
+       return false;
+}
+
+static int
+qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
+       struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+       uint count = 0;
+       uint i;
+
+       ql_dbg(ql_dbg_misc, vha, 0xd212,
+           "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
+       if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
+               for (i = 0; i < vha->hw->max_req_queues; i++) {
+                       struct req_que *req = vha->hw->req_q_map[i];
+                       if (req || !buf) {
+                               qla27xx_insert16(i, buf, len);
+                               qla27xx_insert16(1, buf, len);
+                               qla27xx_insert32(req && req->out_ptr ?
+                                   *req->out_ptr : 0, buf, len);
+                               count++;
+                       }
+               }
+       } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
+               for (i = 0; i < vha->hw->max_rsp_queues; i++) {
+                       struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+                       if (rsp || !buf) {
+                               qla27xx_insert16(i, buf, len);
+                               qla27xx_insert16(1, buf, len);
+                               qla27xx_insert32(rsp && rsp->in_ptr ?
+                                   *rsp->in_ptr : 0, buf, len);
+                               count++;
+                       }
+               }
+       } else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+               ql_dbg(ql_dbg_misc, vha, 0xd02e,
+                   "%s: unsupported atio queue\n", __func__);
+               qla27xx_skip_entry(ent, buf);
+       } else {
+               ql_dbg(ql_dbg_misc, vha, 0xd02f,
+                   "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
+               qla27xx_skip_entry(ent, buf);
        }
 
+       if (buf)
+               ent->t274.num_queues = count;
+
+       if (!count)
+               qla27xx_skip_entry(ent, buf);
+
        return false;
 }
 
@@ -709,6 +755,7 @@ static struct qla27xx_fwdt_entry_call ql27xx_fwdt_entry_call_list[] = {
        { ENTRY_TYPE_WRREMREG           , qla27xx_fwdt_entry_t271  } ,
        { ENTRY_TYPE_RDREMRAM           , qla27xx_fwdt_entry_t272  } ,
        { ENTRY_TYPE_PCICFG             , qla27xx_fwdt_entry_t273  } ,
+       { ENTRY_TYPE_GET_SHADOW         , qla27xx_fwdt_entry_t274  } ,
        { -1                            , qla27xx_fwdt_entry_other }
 };
 
index c9d2fff4d96440905d8b018c9b1bbeae0beadc7d..1967424c8e64c832bac74ea98841556f762c5e39 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
@@ -52,6 +52,7 @@ struct __packed qla27xx_fwdt_template {
 #define ENTRY_TYPE_WRREMREG            271
 #define ENTRY_TYPE_RDREMRAM            272
 #define ENTRY_TYPE_PCICFG              273
+#define ENTRY_TYPE_GET_SHADOW          274
 
 #define CAPTURE_FLAG_PHYS_ONLY         BIT_0
 #define CAPTURE_FLAG_PHYS_VIRT         BIT_1
@@ -109,12 +110,12 @@ struct __packed qla27xx_fwdt_entry {
                } t259;
 
                struct __packed {
-                       uint8_t pci_addr;
+                       uint8_t pci_offset;
                        uint8_t reserved[3];
                } t260;
 
                struct __packed {
-                       uint8_t pci_addr;
+                       uint8_t pci_offset;
                        uint8_t reserved[3];
                        uint32_t write_data;
                } t261;
@@ -186,6 +187,12 @@ struct __packed qla27xx_fwdt_entry {
                        uint32_t addr;
                        uint32_t count;
                } t273;
+
+               struct __packed {
+                       uint32_t num_queues;
+                       uint8_t  queue_type;
+                       uint8_t  reserved[3];
+               } t274;
        };
 };
 
@@ -202,4 +209,8 @@ struct __packed qla27xx_fwdt_entry {
 #define T268_BUF_TYPE_EXCH_BUFOFF      2
 #define T268_BUF_TYPE_EXTD_LOGIN       3
 
+#define T274_QUEUE_TYPE_REQ_SHAD       1
+#define T274_QUEUE_TYPE_RSP_SHAD       2
+#define T274_QUEUE_TYPE_ATIO_SHAD      3
+
 #endif
index e36b947125440a3a39b4c093df5ff81a04467c69..4d2c98cbec4fbf21c3b1aeb517e735beac8fbdd1 100644 (file)
@@ -1,13 +1,13 @@
 /*
  * QLogic Fibre Channel HBA Driver
- * Copyright (c)  2003-2013 QLogic Corporation
+ * Copyright (c)  2003-2014 QLogic Corporation
  *
  * See LICENSE.qla2xxx for copyright and licensing details.
  */
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.02-k"
+#define QLA2XXX_VERSION      "8.07.00.08-k"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   7
index 68fb66fdb757fbc98784fad4ff7a5d03c49ee73d..896cb23adb77f2e0fcb32de955a4c1c1ec5c035a 100644 (file)
@@ -472,6 +472,11 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
        cmd->sg_cnt = se_cmd->t_data_nents;
        cmd->sg = se_cmd->t_data_sg;
 
+       cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+       cmd->prot_sg = se_cmd->t_prot_sg;
+       cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
+       se_cmd->pi_err = 0;
+
        /*
         * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
         * the SGL mappings into PCIe memory for incoming FCP WRITE data.
@@ -567,8 +572,13 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
                        return;
                }
 
-               transport_generic_request_failure(&cmd->se_cmd,
-                                                 TCM_CHECK_CONDITION_ABORT_CMD);
+               if (cmd->se_cmd.pi_err)
+                       transport_generic_request_failure(&cmd->se_cmd,
+                               cmd->se_cmd.pi_err);
+               else
+                       transport_generic_request_failure(&cmd->se_cmd,
+                               TCM_CHECK_CONDITION_ABORT_CMD);
+
                return;
        }
 
@@ -584,6 +594,27 @@ static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
        queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 }
 
+static void tcm_qla2xxx_handle_dif_work(struct work_struct *work)
+{
+       struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+
+       /* take an extra kref to prevent cmd free too early.
+        * need to wait for SCSI status/check condition to
+        * finish responding generate by transport_generic_request_failure.
+        */
+       kref_get(&cmd->se_cmd.cmd_kref);
+       transport_generic_request_failure(&cmd->se_cmd, cmd->se_cmd.pi_err);
+}
+
+/*
+ * Called from qla_target.c:qlt_do_ctio_completion()
+ */
+static void tcm_qla2xxx_handle_dif_err(struct qla_tgt_cmd *cmd)
+{
+       INIT_WORK(&cmd->work, tcm_qla2xxx_handle_dif_work);
+       queue_work(tcm_qla2xxx_free_wq, &cmd->work);
+}
+
 /*
  * Called from qla_target.c:qlt_issue_task_mgmt()
  */
@@ -610,6 +641,11 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
        cmd->sg = se_cmd->t_data_sg;
        cmd->offset = 0;
 
+       cmd->prot_sg_cnt = se_cmd->t_prot_nents;
+       cmd->prot_sg = se_cmd->t_prot_sg;
+       cmd->blk_sz  = se_cmd->se_dev->dev_attrib.block_size;
+       se_cmd->pi_err = 0;
+
        /*
         * Now queue completed DATA_IN the qla2xxx LLD and response ring
         */
@@ -1600,6 +1636,7 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
 static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
        .handle_cmd             = tcm_qla2xxx_handle_cmd,
        .handle_data            = tcm_qla2xxx_handle_data,
+       .handle_dif_err         = tcm_qla2xxx_handle_dif_err,
        .handle_tmr             = tcm_qla2xxx_handle_tmr,
        .free_cmd               = tcm_qla2xxx_free_cmd,
        .free_mcmd              = tcm_qla2xxx_free_mcmd,
index 2eba35365920d5d58fb3c8a2df68a8a6b3e9636d..556c1525f881650261187b01e60165a36ce81c00 100644 (file)
@@ -249,110 +249,6 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
        qla4_83xx_flash_unlock(ha);
 }
 
-/**
- * qla4_83xx_ms_mem_write_128b - Writes data to MS/off-chip memory
- * @ha: Pointer to adapter structure
- * @addr: Flash address to write to
- * @data: Data to be written
- * @count: word_count to be written
- *
- * Return: On success return QLA_SUCCESS
- *        On error return QLA_ERROR
- **/
-int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
-                               uint32_t *data, uint32_t count)
-{
-       int i, j;
-       uint32_t agt_ctrl;
-       unsigned long flags;
-       int ret_val = QLA_SUCCESS;
-
-       /* Only 128-bit aligned access */
-       if (addr & 0xF) {
-               ret_val = QLA_ERROR;
-               goto exit_ms_mem_write;
-       }
-
-       write_lock_irqsave(&ha->hw_lock, flags);
-
-       /* Write address */
-       ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
-       if (ret_val == QLA_ERROR) {
-               ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
-                          __func__);
-               goto exit_ms_mem_write_unlock;
-       }
-
-       for (i = 0; i < count; i++, addr += 16) {
-               if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
-                                            QLA8XXX_ADDR_QDR_NET_MAX)) ||
-                     (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
-                                            QLA8XXX_ADDR_DDR_NET_MAX)))) {
-                       ret_val = QLA_ERROR;
-                       goto exit_ms_mem_write_unlock;
-               }
-
-               ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_LO,
-                                                   addr);
-               /* Write data */
-               ret_val |= qla4_83xx_wr_reg_indirect(ha,
-                                                    MD_MIU_TEST_AGT_WRDATA_LO,
-                                                    *data++);
-               ret_val |= qla4_83xx_wr_reg_indirect(ha,
-                                                    MD_MIU_TEST_AGT_WRDATA_HI,
-                                                    *data++);
-               ret_val |= qla4_83xx_wr_reg_indirect(ha,
-                                                    MD_MIU_TEST_AGT_WRDATA_ULO,
-                                                    *data++);
-               ret_val |= qla4_83xx_wr_reg_indirect(ha,
-                                                    MD_MIU_TEST_AGT_WRDATA_UHI,
-                                                    *data++);
-               if (ret_val == QLA_ERROR) {
-                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
-                                  __func__);
-                       goto exit_ms_mem_write_unlock;
-               }
-
-               /* Check write status */
-               ret_val = qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
-                                                   MIU_TA_CTL_WRITE_ENABLE);
-               ret_val |= qla4_83xx_wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
-                                                    MIU_TA_CTL_WRITE_START);
-               if (ret_val == QLA_ERROR) {
-                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
-                                  __func__);
-                       goto exit_ms_mem_write_unlock;
-               }
-
-               for (j = 0; j < MAX_CTL_CHECK; j++) {
-                       ret_val = qla4_83xx_rd_reg_indirect(ha,
-                                                       MD_MIU_TEST_AGT_CTRL,
-                                                       &agt_ctrl);
-                       if (ret_val == QLA_ERROR) {
-                               ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
-                                          __func__);
-                               goto exit_ms_mem_write_unlock;
-                       }
-                       if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
-                               break;
-               }
-
-               /* Status check failed */
-               if (j >= MAX_CTL_CHECK) {
-                       printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
-                                          __func__);
-                       ret_val = QLA_ERROR;
-                       goto exit_ms_mem_write_unlock;
-               }
-       }
-
-exit_ms_mem_write_unlock:
-       write_unlock_irqrestore(&ha->hw_lock, flags);
-
-exit_ms_mem_write:
-       return ret_val;
-}
-
 #define INTENT_TO_RECOVER      0x01
 #define PROCEED_TO_RECOVER     0x02
 
@@ -760,7 +656,7 @@ static int qla4_83xx_copy_bootloader(struct scsi_qla_host *ha)
                          __func__));
 
        /* 128 bit/16 byte write to MS memory */
-       ret_val = qla4_83xx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
+       ret_val = qla4_8xxx_ms_mem_write_128b(ha, dest, (uint32_t *)p_cache,
                                              count);
        if (ret_val == QLA_ERROR) {
                ql4_printk(KERN_ERR, ha, "%s: Error writing firmware to MS\n",
index a0de6e25ea5a03b5721789642a605ecc28ae035a..775fdf9fcc876f5a53a16feaf1981e86c46e0caf 100644 (file)
@@ -254,6 +254,50 @@ struct qla83xx_minidump_entry_pollrd {
        uint32_t rsvd_1;
 };
 
+struct qla8044_minidump_entry_rddfe {
+       struct qla8xxx_minidump_entry_hdr h;
+       uint32_t addr_1;
+       uint32_t value;
+       uint8_t stride;
+       uint8_t stride2;
+       uint16_t count;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t modify_mask;
+       uint32_t data_size;
+       uint32_t rsvd;
+
+} __packed;
+
+struct qla8044_minidump_entry_rdmdio {
+       struct qla8xxx_minidump_entry_hdr h;
+
+       uint32_t addr_1;
+       uint32_t addr_2;
+       uint32_t value_1;
+       uint8_t stride_1;
+       uint8_t stride_2;
+       uint16_t count;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t value_2;
+       uint32_t data_size;
+
+} __packed;
+
+struct qla8044_minidump_entry_pollwr {
+       struct qla8xxx_minidump_entry_hdr h;
+       uint32_t addr_1;
+       uint32_t addr_2;
+       uint32_t value_1;
+       uint32_t value_2;
+       uint32_t poll;
+       uint32_t mask;
+       uint32_t data_size;
+       uint32_t rsvd;
+
+} __packed;
+
 /* RDMUX2 Entry */
 struct qla83xx_minidump_entry_rdmux2 {
        struct qla8xxx_minidump_entry_hdr h;
index 73a502288bde6aa003154682a0f3d1da534a210a..8f6d0fb2cd807255a66e962c3cb7c4c8633d4d77 100644 (file)
@@ -601,6 +601,7 @@ struct scsi_qla_host {
 #define DPC_HA_NEED_QUIESCENT          22 /* 0x00400000 ISP-82xx only*/
 #define DPC_POST_IDC_ACK               23 /* 0x00800000 */
 #define DPC_RESTORE_ACB                        24 /* 0x01000000 */
+#define DPC_SYSFS_DDB_EXPORT           25 /* 0x02000000 */
 
        struct Scsi_Host *host; /* pointer to host data */
        uint32_t tot_ddbs;
index 209853ce0bbc63a5ca0a1a2a5601d2bdf68baa87..699575efc9ba89f9f027384b1aeaa124f5aa2136 100644 (file)
@@ -1415,6 +1415,9 @@ struct ql_iscsi_stats {
 #define QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN       16
 #define QLA83XX_SS_OCM_WNDREG_INDEX            3
 #define QLA83XX_SS_PCI_INDEX                   0
+#define QLA8022_TEMPLATE_CAP_OFFSET            172
+#define QLA83XX_TEMPLATE_CAP_OFFSET            268
+#define QLA80XX_TEMPLATE_RESERVED_BITS         16
 
 struct qla4_8xxx_minidump_template_hdr {
        uint32_t entry_type;
@@ -1434,6 +1437,7 @@ struct qla4_8xxx_minidump_template_hdr {
        uint32_t saved_state_array[QLA8XXX_DBG_STATE_ARRAY_LEN];
        uint32_t capture_size_array[QLA8XXX_DBG_CAP_SIZE_ARRAY_LEN];
        uint32_t ocm_window_reg[QLA83XX_DBG_OCM_WNDREG_ARRAY_LEN];
+       uint32_t capabilities[QLA80XX_TEMPLATE_RESERVED_BITS];
 };
 
 #endif /*  _QLA4X_FW_H */
index b1a19cd8d5b216edda62c2e7aef0517f494c1521..5f58b451327e8a79040facd208b041c2a43cdb8f 100644 (file)
@@ -274,13 +274,14 @@ int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
 int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
                    uint32_t acb_type, uint32_t len);
 int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
-int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha,
                                uint64_t addr, uint32_t *data, uint32_t count);
 uint8_t qla4xxx_set_ipaddr_state(uint8_t fw_ipaddr_state);
 int qla4_83xx_get_port_config(struct scsi_qla_host *ha, uint32_t *config);
 int qla4_83xx_set_port_config(struct scsi_qla_host *ha, uint32_t *config);
 int qla4_8xxx_check_init_adapter_retry(struct scsi_qla_host *ha);
 int qla4_83xx_is_detached(struct scsi_qla_host *ha);
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha);
 
 extern int ql4xextended_error_logging;
 extern int ql4xdontresethba;
index 28fbece7e08fe8158872eb9cd74dd17cbd19b85f..6f12f859b11db787e3b8add134f4d7515cf5ecf4 100644 (file)
@@ -282,6 +282,25 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
        return ipv4_wait|ipv6_wait;
 }
 
+static int qla4_80xx_is_minidump_dma_capable(struct scsi_qla_host *ha,
+               struct qla4_8xxx_minidump_template_hdr *md_hdr)
+{
+       int offset = (is_qla8022(ha)) ? QLA8022_TEMPLATE_CAP_OFFSET :
+                                       QLA83XX_TEMPLATE_CAP_OFFSET;
+       int rval = 1;
+       uint32_t *cap_offset;
+
+       cap_offset = (uint32_t *)((char *)md_hdr + offset);
+
+       if (!(le32_to_cpu(*cap_offset) & BIT_0)) {
+               ql4_printk(KERN_INFO, ha, "PEX DMA Not supported %d\n",
+                          *cap_offset);
+               rval = 0;
+       }
+
+       return rval;
+}
+
 /**
  * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
  * @ha: pointer to host adapter structure.
@@ -294,6 +313,7 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
        void *md_tmp;
        dma_addr_t md_tmp_dma;
        struct qla4_8xxx_minidump_template_hdr *md_hdr;
+       int dma_capable;
 
        if (ha->fw_dump) {
                ql4_printk(KERN_WARNING, ha,
@@ -326,13 +346,19 @@ void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
 
        md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
 
+       dma_capable = qla4_80xx_is_minidump_dma_capable(ha, md_hdr);
+
        capture_debug_level = md_hdr->capture_debug_level;
 
        /* Get capture mask based on module loadtime setting. */
-       if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
+       if ((ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F) ||
+           (ql4xmdcapmask == 0xFF && dma_capable))  {
                ha->fw_dump_capture_mask = ql4xmdcapmask;
-       else
+       } else {
+               if (ql4xmdcapmask == 0xFF)
+                       ql4_printk(KERN_INFO, ha, "Falling back to default capture mask, as PEX DMA is not supported\n");
                ha->fw_dump_capture_mask = capture_debug_level;
+       }
 
        md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
 
@@ -864,6 +890,8 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
        if (status == QLA_SUCCESS) {
                if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags))
                        qla4xxx_get_crash_record(ha);
+
+               qla4xxx_init_rings(ha);
        } else {
                DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n",
                             ha->host_no, __func__));
index b1925d195f41ce29c69e4f3258da2e075ed3fe94..081b6b78d2c6beab69aa44e0c161f9581a17365b 100644 (file)
@@ -1526,7 +1526,7 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
 
 int qla4xxx_request_irqs(struct scsi_qla_host *ha)
 {
-       int ret;
+       int ret = 0;
        int rval = QLA_ERROR;
 
        if (is_qla40XX(ha))
@@ -1580,15 +1580,13 @@ try_msi:
                }
        }
 
-       /*
-        * Prevent interrupts from falling back to INTx mode in cases where
-        * interrupts cannot get acquired through MSI-X or MSI mode.
-        */
+try_intx:
        if (is_qla8022(ha)) {
-               ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
+               ql4_printk(KERN_WARNING, ha, "%s: ISP82xx Legacy interrupt not supported\n",
+                          __func__);
                goto irq_not_attached;
        }
-try_intx:
+
        /* Trying INTx */
        ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
            IRQF_SHARED, DRIVER_NAME, ha);
index 0a6b782d6fdbe9d5113dcc0c19ff9b50d80b010b..0a3312c6dd6d5fb5ba03001e7bd8b4a5e34470f0 100644 (file)
@@ -2381,7 +2381,7 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
                        ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
                                   __func__);
                        rval = QLA_ERROR;
-                       goto exit_config_acb;
+                       goto exit_free_acb;
                }
                memcpy(ha->saved_acb, acb, acb_len);
                break;
@@ -2395,8 +2395,6 @@ int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
                }
 
                memcpy(acb, ha->saved_acb, acb_len);
-               kfree(ha->saved_acb);
-               ha->saved_acb = NULL;
 
                rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
                if (rval != QLA_SUCCESS)
@@ -2412,6 +2410,10 @@ exit_free_acb:
        dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
                          acb_dma);
 exit_config_acb:
+       if ((acb_config == ACB_CONFIG_SET) && ha->saved_acb) {
+               kfree(ha->saved_acb);
+               ha->saved_acb = NULL;
+       }
        DEBUG2(ql4_printk(KERN_INFO, ha,
                          "%s %s\n", __func__,
                          rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
index 63328c812b70019c15bf6c95a19b97d063e054f8..9dbdb4be2d8f7258a75043738d819dffcd410146 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
+#define TIMEOUT_100_MS 100
 #define MASK(n)                DMA_BIT_MASK(n)
 #define MN_WIN(addr)   (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
 #define OCM_WIN(addr)  (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
@@ -1176,6 +1177,112 @@ qla4_82xx_pinit_from_rom(struct scsi_qla_host *ha, int verbose)
        return 0;
 }
 
+/**
+ * qla4_8xxx_ms_mem_write_128b - Writes data to MS/off-chip memory
+ * @ha: Pointer to adapter structure
+ * @addr: Flash address to write to
+ * @data: Data to be written
+ * @count: word_count to be written
+ *
+ * Return: On success return QLA_SUCCESS
+ *         On error return QLA_ERROR
+ **/
+int qla4_8xxx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+                               uint32_t *data, uint32_t count)
+{
+       int i, j;
+       uint32_t agt_ctrl;
+       unsigned long flags;
+       int ret_val = QLA_SUCCESS;
+
+       /* Only 128-bit aligned access */
+       if (addr & 0xF) {
+               ret_val = QLA_ERROR;
+               goto exit_ms_mem_write;
+       }
+
+       write_lock_irqsave(&ha->hw_lock, flags);
+
+       /* Write address */
+       ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+       if (ret_val == QLA_ERROR) {
+               ql4_printk(KERN_ERR, ha, "%s: write to AGT_ADDR_HI failed\n",
+                          __func__);
+               goto exit_ms_mem_write_unlock;
+       }
+
+       for (i = 0; i < count; i++, addr += 16) {
+               if (!((QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_QDR_NET,
+                                            QLA8XXX_ADDR_QDR_NET_MAX)) ||
+                     (QLA8XXX_ADDR_IN_RANGE(addr, QLA8XXX_ADDR_DDR_NET,
+                                            QLA8XXX_ADDR_DDR_NET_MAX)))) {
+                       ret_val = QLA_ERROR;
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               ret_val = ha->isp_ops->wr_reg_indirect(ha,
+                                                      MD_MIU_TEST_AGT_ADDR_LO,
+                                                      addr);
+               /* Write data */
+               ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+                                               MD_MIU_TEST_AGT_WRDATA_LO,
+                                               *data++);
+               ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+                                               MD_MIU_TEST_AGT_WRDATA_HI,
+                                               *data++);
+               ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+                                               MD_MIU_TEST_AGT_WRDATA_ULO,
+                                               *data++);
+               ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+                                               MD_MIU_TEST_AGT_WRDATA_UHI,
+                                               *data++);
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_WRDATA failed\n",
+                                  __func__);
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               /* Check write status */
+               ret_val = ha->isp_ops->wr_reg_indirect(ha, MD_MIU_TEST_AGT_CTRL,
+                                                      MIU_TA_CTL_WRITE_ENABLE);
+               ret_val |= ha->isp_ops->wr_reg_indirect(ha,
+                                                       MD_MIU_TEST_AGT_CTRL,
+                                                       MIU_TA_CTL_WRITE_START);
+               if (ret_val == QLA_ERROR) {
+                       ql4_printk(KERN_ERR, ha, "%s: write to AGT_CTRL failed\n",
+                                  __func__);
+                       goto exit_ms_mem_write_unlock;
+               }
+
+               for (j = 0; j < MAX_CTL_CHECK; j++) {
+                       ret_val = ha->isp_ops->rd_reg_indirect(ha,
+                                                       MD_MIU_TEST_AGT_CTRL,
+                                                       &agt_ctrl);
+                       if (ret_val == QLA_ERROR) {
+                               ql4_printk(KERN_ERR, ha, "%s: failed to read MD_MIU_TEST_AGT_CTRL\n",
+                                          __func__);
+                               goto exit_ms_mem_write_unlock;
+                       }
+                       if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+                               break;
+               }
+
+               /* Status check failed */
+               if (j >= MAX_CTL_CHECK) {
+                       printk_ratelimited(KERN_ERR "%s: MS memory write failed!\n",
+                                          __func__);
+                       ret_val = QLA_ERROR;
+                       goto exit_ms_mem_write_unlock;
+               }
+       }
+
+exit_ms_mem_write_unlock:
+       write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+       return ret_val;
+}
+
 static int
 qla4_82xx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
 {
@@ -1714,6 +1821,101 @@ void qla4_82xx_rom_lock_recovery(struct scsi_qla_host *ha)
        qla4_82xx_rom_unlock(ha);
 }
 
+static uint32_t ql4_84xx_poll_wait_for_ready(struct scsi_qla_host *ha,
+                                            uint32_t addr1, uint32_t mask)
+{
+       unsigned long timeout;
+       uint32_t rval = QLA_SUCCESS;
+       uint32_t temp;
+
+       timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+       do {
+               ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+               if ((temp & mask) != 0)
+                       break;
+
+               if (time_after_eq(jiffies, timeout)) {
+                       ql4_printk(KERN_INFO, ha, "Error in processing rdmdio entry\n");
+                       return QLA_ERROR;
+               }
+       } while (1);
+
+       return rval;
+}
+
+uint32_t ql4_84xx_ipmdio_rd_reg(struct scsi_qla_host *ha, uint32_t addr1,
+                               uint32_t addr3, uint32_t mask, uint32_t addr,
+                               uint32_t *data_ptr)
+{
+       int rval = QLA_SUCCESS;
+       uint32_t temp;
+       uint32_t data;
+
+       rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+       if (rval)
+               goto exit_ipmdio_rd_reg;
+
+       temp = (0x40000000 | addr);
+       ha->isp_ops->wr_reg_indirect(ha, addr1, temp);
+
+       rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+       if (rval)
+               goto exit_ipmdio_rd_reg;
+
+       ha->isp_ops->rd_reg_indirect(ha, addr3, &data);
+       *data_ptr = data;
+
+exit_ipmdio_rd_reg:
+       return rval;
+}
+
+
+static uint32_t ql4_84xx_poll_wait_ipmdio_bus_idle(struct scsi_qla_host *ha,
+                                                   uint32_t addr1,
+                                                   uint32_t addr2,
+                                                   uint32_t addr3,
+                                                   uint32_t mask)
+{
+       unsigned long timeout;
+       uint32_t temp;
+       uint32_t rval = QLA_SUCCESS;
+
+       timeout = jiffies + msecs_to_jiffies(TIMEOUT_100_MS);
+       do {
+               ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3, mask, addr2, &temp);
+               if ((temp & 0x1) != 1)
+                       break;
+               if (time_after_eq(jiffies, timeout)) {
+                       ql4_printk(KERN_INFO, ha, "Error in processing mdiobus idle\n");
+                       return QLA_ERROR;
+               }
+       } while (1);
+
+       return rval;
+}
+
+static int ql4_84xx_ipmdio_wr_reg(struct scsi_qla_host *ha,
+                                 uint32_t addr1, uint32_t addr3,
+                                 uint32_t mask, uint32_t addr,
+                                 uint32_t value)
+{
+       int rval = QLA_SUCCESS;
+
+       rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+       if (rval)
+               goto exit_ipmdio_wr_reg;
+
+       ha->isp_ops->wr_reg_indirect(ha, addr3, value);
+       ha->isp_ops->wr_reg_indirect(ha, addr1, addr);
+
+       rval = ql4_84xx_poll_wait_for_ready(ha, addr1, mask);
+       if (rval)
+               goto exit_ipmdio_wr_reg;
+
+exit_ipmdio_wr_reg:
+       return rval;
+}
+
 static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
                                struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
@@ -1822,7 +2024,7 @@ error_exit:
        return rval;
 }
 
-static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+static int qla4_8xxx_minidump_pex_dma_read(struct scsi_qla_host *ha,
                                struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
 {
@@ -1899,11 +2101,11 @@ static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
                dma_desc.cmd.read_data_size = size;
 
                /* Prepare: Write pex-dma descriptor to MS memory. */
-               rval = qla4_83xx_ms_mem_write_128b(ha,
+               rval = qla4_8xxx_ms_mem_write_128b(ha,
                              (uint64_t)m_hdr->desc_card_addr,
                              (uint32_t *)&dma_desc,
                              (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
-               if (rval == -1) {
+               if (rval != QLA_SUCCESS) {
                        ql4_printk(KERN_INFO, ha,
                                   "%s: Error writing rdmem-dma-init to MS !!!\n",
                                   __func__);
@@ -2359,17 +2561,10 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
        uint32_t *data_ptr = *d_ptr;
        int rval = QLA_SUCCESS;
 
-       if (is_qla8032(ha) || is_qla8042(ha)) {
-               rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
-                                                      &data_ptr);
-               if (rval != QLA_SUCCESS) {
-                       rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
-                                                                 &data_ptr);
-               }
-       } else {
+       rval = qla4_8xxx_minidump_pex_dma_read(ha, entry_hdr, &data_ptr);
+       if (rval != QLA_SUCCESS)
                rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
                                                          &data_ptr);
-       }
        *d_ptr = data_ptr;
        return rval;
 }
@@ -2440,6 +2635,227 @@ exit_process_pollrd:
        return rval;
 }
 
+static uint32_t qla4_84xx_minidump_process_rddfe(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       int loop_cnt;
+       uint32_t addr1, addr2, value, data, temp, wrval;
+       uint8_t stride, stride2;
+       uint16_t count;
+       uint32_t poll, mask, data_size, modify_mask;
+       uint32_t wait_count = 0;
+       uint32_t *data_ptr = *d_ptr;
+       struct qla8044_minidump_entry_rddfe *rddfe;
+       uint32_t rval = QLA_SUCCESS;
+
+       rddfe = (struct qla8044_minidump_entry_rddfe *)entry_hdr;
+       addr1 = le32_to_cpu(rddfe->addr_1);
+       value = le32_to_cpu(rddfe->value);
+       stride = le32_to_cpu(rddfe->stride);
+       stride2 = le32_to_cpu(rddfe->stride2);
+       count = le32_to_cpu(rddfe->count);
+
+       poll = le32_to_cpu(rddfe->poll);
+       mask = le32_to_cpu(rddfe->mask);
+       modify_mask = le32_to_cpu(rddfe->modify_mask);
+       data_size = le32_to_cpu(rddfe->data_size);
+
+       addr2 = addr1 + stride;
+
+       for (loop_cnt = 0x0; loop_cnt < count; loop_cnt++) {
+               ha->isp_ops->wr_reg_indirect(ha, addr1, (0x40000000 | value));
+
+               wait_count = 0;
+               while (wait_count < poll) {
+                       ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+                       if ((temp & mask) != 0)
+                               break;
+                       wait_count++;
+               }
+
+               if (wait_count == poll) {
+                       ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+                       rval = QLA_ERROR;
+                       goto exit_process_rddfe;
+               } else {
+                       ha->isp_ops->rd_reg_indirect(ha, addr2, &temp);
+                       temp = temp & modify_mask;
+                       temp = (temp | ((loop_cnt << 16) | loop_cnt));
+                       wrval = ((temp << 16) | temp);
+
+                       ha->isp_ops->wr_reg_indirect(ha, addr2, wrval);
+                       ha->isp_ops->wr_reg_indirect(ha, addr1, value);
+
+                       wait_count = 0;
+                       while (wait_count < poll) {
+                               ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+                               if ((temp & mask) != 0)
+                                       break;
+                               wait_count++;
+                       }
+                       if (wait_count == poll) {
+                               ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+                                          __func__);
+                               rval = QLA_ERROR;
+                               goto exit_process_rddfe;
+                       }
+
+                       ha->isp_ops->wr_reg_indirect(ha, addr1,
+                                                    ((0x40000000 | value) +
+                                                    stride2));
+                       wait_count = 0;
+                       while (wait_count < poll) {
+                               ha->isp_ops->rd_reg_indirect(ha, addr1, &temp);
+                               if ((temp & mask) != 0)
+                                       break;
+                               wait_count++;
+                       }
+
+                       if (wait_count == poll) {
+                               ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n",
+                                          __func__);
+                               rval = QLA_ERROR;
+                               goto exit_process_rddfe;
+                       }
+
+                       ha->isp_ops->rd_reg_indirect(ha, addr2, &data);
+
+                       *data_ptr++ = cpu_to_le32(wrval);
+                       *data_ptr++ = cpu_to_le32(data);
+               }
+       }
+
+       *d_ptr = data_ptr;
+exit_process_rddfe:
+       return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_rdmdio(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       int rval = QLA_SUCCESS;
+       uint32_t addr1, addr2, value1, value2, data, selval;
+       uint8_t stride1, stride2;
+       uint32_t addr3, addr4, addr5, addr6, addr7;
+       uint16_t count, loop_cnt;
+       uint32_t poll, mask;
+       uint32_t *data_ptr = *d_ptr;
+       struct qla8044_minidump_entry_rdmdio *rdmdio;
+
+       rdmdio = (struct qla8044_minidump_entry_rdmdio *)entry_hdr;
+       addr1 = le32_to_cpu(rdmdio->addr_1);
+       addr2 = le32_to_cpu(rdmdio->addr_2);
+       value1 = le32_to_cpu(rdmdio->value_1);
+       stride1 = le32_to_cpu(rdmdio->stride_1);
+       stride2 = le32_to_cpu(rdmdio->stride_2);
+       count = le32_to_cpu(rdmdio->count);
+
+       poll = le32_to_cpu(rdmdio->poll);
+       mask = le32_to_cpu(rdmdio->mask);
+       value2 = le32_to_cpu(rdmdio->value_2);
+
+       addr3 = addr1 + stride1;
+
+       for (loop_cnt = 0; loop_cnt < count; loop_cnt++) {
+               rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+                                                        addr3, mask);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               addr4 = addr2 - stride1;
+               rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr4,
+                                            value2);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               addr5 = addr2 - (2 * stride1);
+               rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask, addr5,
+                                            value1);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               addr6 = addr2 - (3 * stride1);
+               rval = ql4_84xx_ipmdio_wr_reg(ha, addr1, addr3, mask,
+                                            addr6, 0x2);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               rval = ql4_84xx_poll_wait_ipmdio_bus_idle(ha, addr1, addr2,
+                                                        addr3, mask);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               addr7 = addr2 - (4 * stride1);
+               rval = ql4_84xx_ipmdio_rd_reg(ha, addr1, addr3,
+                                                     mask, addr7, &data);
+               if (rval)
+                       goto exit_process_rdmdio;
+
+               selval = (value2 << 18) | (value1 << 2) | 2;
+
+               stride2 = le32_to_cpu(rdmdio->stride_2);
+               *data_ptr++ = cpu_to_le32(selval);
+               *data_ptr++ = cpu_to_le32(data);
+
+               value1 = value1 + stride2;
+               *d_ptr = data_ptr;
+       }
+
+exit_process_rdmdio:
+       return rval;
+}
+
+static uint32_t qla4_84xx_minidump_process_pollwr(struct scsi_qla_host *ha,
+                               struct qla8xxx_minidump_entry_hdr *entry_hdr,
+                               uint32_t **d_ptr)
+{
+       uint32_t addr1, addr2, value1, value2, poll, mask, r_value;
+       struct qla8044_minidump_entry_pollwr *pollwr_hdr;
+       uint32_t wait_count = 0;
+       uint32_t rval = QLA_SUCCESS;
+
+       pollwr_hdr = (struct qla8044_minidump_entry_pollwr *)entry_hdr;
+       addr1 = le32_to_cpu(pollwr_hdr->addr_1);
+       addr2 = le32_to_cpu(pollwr_hdr->addr_2);
+       value1 = le32_to_cpu(pollwr_hdr->value_1);
+       value2 = le32_to_cpu(pollwr_hdr->value_2);
+
+       poll = le32_to_cpu(pollwr_hdr->poll);
+       mask = le32_to_cpu(pollwr_hdr->mask);
+
+       while (wait_count < poll) {
+               ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+               if ((r_value & poll) != 0)
+                       break;
+
+               wait_count++;
+       }
+
+       if (wait_count == poll) {
+               ql4_printk(KERN_ERR, ha, "%s: TIMEOUT\n", __func__);
+               rval = QLA_ERROR;
+               goto exit_process_pollwr;
+       }
+
+       ha->isp_ops->wr_reg_indirect(ha, addr2, value2);
+       ha->isp_ops->wr_reg_indirect(ha, addr1, value1);
+
+       wait_count = 0;
+       while (wait_count < poll) {
+               ha->isp_ops->rd_reg_indirect(ha, addr1, &r_value);
+
+               if ((r_value & poll) != 0)
+                       break;
+               wait_count++;
+       }
+
+exit_process_pollwr:
+       return rval;
+}
+
 static void qla83xx_minidump_process_rdmux2(struct scsi_qla_host *ha,
                                struct qla8xxx_minidump_entry_hdr *entry_hdr,
                                uint32_t **d_ptr)
@@ -2753,6 +3169,24 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
                        if (rval != QLA_SUCCESS)
                                qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
                        break;
+               case QLA8044_RDDFE:
+                       rval = qla4_84xx_minidump_process_rddfe(ha, entry_hdr,
+                                                               &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA8044_RDMDIO:
+                       rval = qla4_84xx_minidump_process_rdmdio(ha, entry_hdr,
+                                                                &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
+               case QLA8044_POLLWR:
+                       rval = qla4_84xx_minidump_process_pollwr(ha, entry_hdr,
+                                                                &data_ptr);
+                       if (rval != QLA_SUCCESS)
+                               qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
+                       break;
                case QLA8XXX_RDNOP:
                default:
                        qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
index 14500a0f62cc34891bfbe43f7a2fe5df9908073a..337d9fcf64175d155cdede50d58fa45f68c1f31e 100644 (file)
@@ -858,6 +858,9 @@ struct crb_addr_pair {
 #define QLA83XX_POLLRD 35
 #define QLA83XX_RDMUX2 36
 #define QLA83XX_POLLRDMWR  37
+#define QLA8044_RDDFE  38
+#define QLA8044_RDMDIO 39
+#define QLA8044_POLLWR 40
 #define QLA8XXX_RDROM  71
 #define QLA8XXX_RDMEM  72
 #define QLA8XXX_CNTRL  98
index 459b9f7186fd81804745d618dccb603456c633ae..32020637620649d6068876b532267935e3918647 100644 (file)
@@ -83,12 +83,12 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                " Target Session Recovery Timeout.\n"
                "\t\t  Default: 120 sec.");
 
-int ql4xmdcapmask = 0x1F;
+int ql4xmdcapmask = 0;
 module_param(ql4xmdcapmask, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xmdcapmask,
                 " Set the Minidump driver capture mask level.\n"
-                "\t\t  Default is 0x1F.\n"
-                "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
+                "\t\t  Default is 0 (firmware default capture mask)\n"
+                "\t\t  Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F, 0xFF");
 
 int ql4xenablemd = 1;
 module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
@@ -1742,6 +1742,9 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
        struct sockaddr *dst_addr;
        struct scsi_qla_host *ha;
 
+       if (!qla_ep)
+               return -ENOTCONN;
+
        ha = to_qla_host(qla_ep->host);
        DEBUG2(ql4_printk(KERN_INFO, ha, "%s: host: %ld\n", __func__,
                          ha->host_no));
@@ -1749,9 +1752,6 @@ static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep,
        switch (param) {
        case ISCSI_PARAM_CONN_PORT:
        case ISCSI_PARAM_CONN_ADDRESS:
-               if (!qla_ep)
-                       return -ENOTCONN;
-
                dst_addr = (struct sockaddr *)&qla_ep->dst_addr;
                if (!dst_addr)
                        return -ENOTCONN;
@@ -2879,7 +2879,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
        struct iscsi_conn *conn;
        struct qla_conn *qla_conn;
        struct sockaddr *dst_addr;
-       int len = 0;
 
        conn = cls_conn->dd_data;
        qla_conn = conn->dd_data;
@@ -2893,9 +2892,6 @@ static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn,
        default:
                return iscsi_conn_get_param(cls_conn, param, buf);
        }
-
-       return len;
-
 }
 
 int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index)
@@ -3569,14 +3565,13 @@ static int qla4xxx_copy_from_fwddb_param(struct iscsi_bus_flash_session *sess,
        if (test_bit(OPT_IPV6_DEVICE, &options)) {
                conn->ipv6_traffic_class = fw_ddb_entry->ipv4_tos;
 
-               conn->link_local_ipv6_addr = kzalloc(IPv6_ADDR_LEN, GFP_KERNEL);
+               conn->link_local_ipv6_addr = kmemdup(
+                                       fw_ddb_entry->link_local_ipv6_addr,
+                                       IPv6_ADDR_LEN, GFP_KERNEL);
                if (!conn->link_local_ipv6_addr) {
                        rc = -ENOMEM;
                        goto exit_copy;
                }
-
-               memcpy(conn->link_local_ipv6_addr,
-                      fw_ddb_entry->link_local_ipv6_addr, IPv6_ADDR_LEN);
        } else {
                conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
        }
@@ -4565,6 +4560,7 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
             test_bit(DPC_LINK_CHANGED, &ha->dpc_flags) ||
             test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) ||
             test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
+            test_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags) ||
             test_bit(DPC_AEN, &ha->dpc_flags)) {
                DEBUG2(printk("scsi%ld: %s: scheduling dpc routine"
                              " - dpc flags = 0x%lx\n",
@@ -4862,9 +4858,6 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
                    ha->host_no, __func__));
                status = ha->isp_ops->reset_firmware(ha);
                if (status == QLA_SUCCESS) {
-                       if (!test_bit(AF_FW_RECOVERY, &ha->flags))
-                               qla4xxx_cmd_wait(ha);
-
                        ha->isp_ops->disable_intrs(ha);
                        qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
                        qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -5432,6 +5425,11 @@ dpc_post_reset_ha:
                                qla4xxx_relogin_all_devices(ha);
                }
        }
+       if (test_and_clear_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags)) {
+               if (qla4xxx_sysfs_ddb_export(ha))
+                       ql4_printk(KERN_ERR, ha, "%s: Error exporting ddb to sysfs\n",
+                                  __func__);
+       }
 }
 
 /**
@@ -8409,7 +8407,7 @@ exit_ddb_del:
  *
  * Export the firmware DDB for all send targets and normal targets to sysfs.
  **/
-static int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
+int qla4xxx_sysfs_ddb_export(struct scsi_qla_host *ha)
 {
        struct dev_db_entry *fw_ddb_entry = NULL;
        dma_addr_t fw_ddb_entry_dma;
@@ -8847,11 +8845,8 @@ skip_retry_init:
                ql4_printk(KERN_ERR, ha,
                           "%s: No iSCSI boot target configured\n", __func__);
 
-       if (qla4xxx_sysfs_ddb_export(ha))
-               ql4_printk(KERN_ERR, ha,
-                          "%s: Error exporting ddb to sysfs\n", __func__);
-
-               /* Perform the build ddb list and login to each */
+       set_bit(DPC_SYSFS_DDB_EXPORT, &ha->dpc_flags);
+       /* Perform the build ddb list and login to each */
        qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
        iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
        qla4xxx_wait_login_resp_boot_tgt(ha);
index c6ba0a6b8458822868cd728fd971eb59a7cc28f8..f11eaa773339dc4fdcaa1b0590385c1bd8c4476c 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.04.00-k4"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k6"
index f3e9cc038d1d9126dadddb686b356b43c43ba1c5..1328a2621070948980d6978fd0eb088b6ec11bbe 100644 (file)
@@ -130,6 +130,7 @@ static const char * scsi_debug_version_date = "20100324";
 #define SCSI_DEBUG_OPT_DIF_ERR   32
 #define SCSI_DEBUG_OPT_DIX_ERR   64
 #define SCSI_DEBUG_OPT_MAC_TIMEOUT  128
+#define SCSI_DEBUG_OPT_SHORT_TRANSFER  256
 /* When "every_nth" > 0 then modulo "every_nth" commands:
  *   - a no response is simulated if SCSI_DEBUG_OPT_TIMEOUT is set
  *   - a RECOVERED_ERROR is simulated on successful read and write
@@ -3583,6 +3584,7 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
        int inj_transport = 0;
        int inj_dif = 0;
        int inj_dix = 0;
+       int inj_short = 0;
        int delay_override = 0;
        int unmap = 0;
 
@@ -3628,6 +3630,8 @@ int scsi_debug_queuecommand_lck(struct scsi_cmnd *SCpnt, done_funct_t done)
                        inj_dif = 1; /* to reads and writes below */
                else if (SCSI_DEBUG_OPT_DIX_ERR & scsi_debug_opts)
                        inj_dix = 1; /* to reads and writes below */
+               else if (SCSI_DEBUG_OPT_SHORT_TRANSFER & scsi_debug_opts)
+                       inj_short = 1;
        }
 
        if (devip->wlun) {
@@ -3744,6 +3748,10 @@ read:
                if (scsi_debug_fake_rw)
                        break;
                get_data_transfer_info(cmd, &lba, &num, &ei_lba);
+
+               if (inj_short)
+                       num /= 2;
+
                errsts = resp_read(SCpnt, lba, num, devip, ei_lba);
                if (inj_recovered && (0 == errsts)) {
                        mk_sense_buffer(devip, RECOVERED_ERROR,
index f17aa7aa78796e7f6d358b8cd5f68fd43cfee4d4..47a1ffc4c904f8aa17269adedc401cd4ce6e5990 100644 (file)
@@ -1029,6 +1029,7 @@ retry:
                rtn = NEEDS_RETRY;
        } else {
                timeleft = wait_for_completion_timeout(&done, timeout);
+               rtn = SUCCESS;
        }
 
        shost->eh_action = NULL;
@@ -2306,6 +2307,12 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
        }
 
        scmd = scsi_get_command(dev, GFP_KERNEL);
+       if (!scmd) {
+               rtn = FAILED;
+               put_device(&dev->sdev_gendev);
+               goto out_put_autopm_host;
+       }
+
        blk_rq_init(NULL, &req);
        scmd->request = &req;
 
index a0c95cac91f0fe55681830af4477e9513ec19a32..be0d5fad999d149cba321080cc7c5ee30a9da572 100644 (file)
@@ -512,68 +512,6 @@ void scsi_run_host_queues(struct Scsi_Host *shost)
                scsi_run_queue(sdev->request_queue);
 }
 
-static void __scsi_release_buffers(struct scsi_cmnd *, int);
-
-/*
- * Function:    scsi_end_request()
- *
- * Purpose:     Post-processing of completed commands (usually invoked at end
- *             of upper level post-processing and scsi_io_completion).
- *
- * Arguments:   cmd     - command that is complete.
- *              error    - 0 if I/O indicates success, < 0 for I/O error.
- *              bytes    - number of bytes of completed I/O
- *             requeue  - indicates whether we should requeue leftovers.
- *
- * Lock status: Assumed that lock is not held upon entry.
- *
- * Returns:     cmd if requeue required, NULL otherwise.
- *
- * Notes:       This is called for block device requests in order to
- *              mark some number of sectors as complete.
- * 
- *             We are guaranteeing that the request queue will be goosed
- *             at some point during this call.
- * Notes:      If cmd was requeued, upon return it will be a stale pointer.
- */
-static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
-                                         int bytes, int requeue)
-{
-       struct request_queue *q = cmd->device->request_queue;
-       struct request *req = cmd->request;
-
-       /*
-        * If there are blocks left over at the end, set up the command
-        * to queue the remainder of them.
-        */
-       if (blk_end_request(req, error, bytes)) {
-               /* kill remainder if no retrys */
-               if (error && scsi_noretry_cmd(cmd))
-                       blk_end_request_all(req, error);
-               else {
-                       if (requeue) {
-                               /*
-                                * Bleah.  Leftovers again.  Stick the
-                                * leftovers in the front of the
-                                * queue, and goose the queue again.
-                                */
-                               scsi_release_buffers(cmd);
-                               scsi_requeue_command(q, cmd);
-                               cmd = NULL;
-                       }
-                       return cmd;
-               }
-       }
-
-       /*
-        * This will goose the queue request function at the end, so we don't
-        * need to worry about launching another command.
-        */
-       __scsi_release_buffers(cmd, 0);
-       scsi_next_command(cmd);
-       return NULL;
-}
-
 static inline unsigned int scsi_sgtable_index(unsigned short nents)
 {
        unsigned int index;
@@ -625,30 +563,10 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
        __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
 }
 
-static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
-{
-
-       if (cmd->sdb.table.nents)
-               scsi_free_sgtable(&cmd->sdb);
-
-       memset(&cmd->sdb, 0, sizeof(cmd->sdb));
-
-       if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
-               struct scsi_data_buffer *bidi_sdb =
-                       cmd->request->next_rq->special;
-               scsi_free_sgtable(bidi_sdb);
-               kmem_cache_free(scsi_sdb_cache, bidi_sdb);
-               cmd->request->next_rq->special = NULL;
-       }
-
-       if (scsi_prot_sg_count(cmd))
-               scsi_free_sgtable(cmd->prot_sdb);
-}
-
 /*
  * Function:    scsi_release_buffers()
  *
- * Purpose:     Completion processing for block device I/O requests.
+ * Purpose:     Free resources allocate for a scsi_command.
  *
  * Arguments:   cmd    - command that we are bailing.
  *
@@ -659,15 +577,29 @@ static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
  * Notes:       In the event that an upper level driver rejects a
  *             command, we must release resources allocated during
  *             the __init_io() function.  Primarily this would involve
- *             the scatter-gather table, and potentially any bounce
- *             buffers.
+ *             the scatter-gather table.
  */
 void scsi_release_buffers(struct scsi_cmnd *cmd)
 {
-       __scsi_release_buffers(cmd, 1);
+       if (cmd->sdb.table.nents)
+               scsi_free_sgtable(&cmd->sdb);
+
+       memset(&cmd->sdb, 0, sizeof(cmd->sdb));
+
+       if (scsi_prot_sg_count(cmd))
+               scsi_free_sgtable(cmd->prot_sdb);
 }
 EXPORT_SYMBOL(scsi_release_buffers);
 
+static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
+{
+       struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;
+
+       scsi_free_sgtable(bidi_sdb);
+       kmem_cache_free(scsi_sdb_cache, bidi_sdb);
+       cmd->request->next_rq->special = NULL;
+}
+
 /**
  * __scsi_error_from_host_byte - translate SCSI error code into errno
  * @cmd:       SCSI command (unused)
@@ -725,16 +657,9 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
  *
  * Returns:     Nothing
  *
- * Notes:       This function is matched in terms of capabilities to
- *              the function that created the scatter-gather list.
- *              In other words, if there are no bounce buffers
- *              (the normal case for most drivers), we don't need
- *              the logic to deal with cleaning up afterwards.
- *
- *             We must call scsi_end_request().  This will finish off
- *             the specified number of sectors.  If we are done, the
- *             command block will be released and the queue function
- *             will be goosed.  If we are not done then we have to
+ * Notes:       We will finish off the specified number of sectors.  If we
+ *             are done, the command block will be released and the queue
+ *             function will be goosed.  If we are not done then we have to
  *             figure out what to do next:
  *
  *             a) We can call scsi_requeue_command().  The request
@@ -743,7 +668,7 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
  *                be used if we made forward progress, or if we want
  *                to switch from READ(10) to READ(6) for example.
  *
- *             b) We can call scsi_queue_insert().  The request will
+ *             b) We can call __scsi_queue_insert().  The request will
  *                be put back on the queue and retried using the same
  *                command as before, possibly after a delay.
  *
@@ -801,6 +726,8 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        req->next_rq->resid_len = scsi_in(cmd)->resid;
 
                        scsi_release_buffers(cmd);
+                       scsi_release_bidi_buffers(cmd);
+
                        blk_end_request_all(req, 0);
 
                        scsi_next_command(cmd);
@@ -840,12 +767,25 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        }
 
        /*
-        * A number of bytes were successfully read.  If there
-        * are leftovers and there is some kind of error
-        * (result != 0), retry the rest.
+        * If we finished all bytes in the request we are done now.
         */
-       if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
-               return;
+       if (!blk_end_request(req, error, good_bytes))
+               goto next_command;
+
+       /*
+        * Kill remainder if no retrys.
+        */
+       if (error && scsi_noretry_cmd(cmd)) {
+               blk_end_request_all(req, error);
+               goto next_command;
+       }
+
+       /*
+        * If there had been no error, but we have leftover bytes in the
+        * requeues just queue the command up again.
+        */
+       if (result == 0)
+               goto requeue;
 
        error = __scsi_error_from_host_byte(cmd, result);
 
@@ -973,7 +913,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        switch (action) {
        case ACTION_FAIL:
                /* Give up and fail the remainder of the request */
-               scsi_release_buffers(cmd);
                if (!(req->cmd_flags & REQ_QUIET)) {
                        if (description)
                                scmd_printk(KERN_INFO, cmd, "%s\n",
@@ -983,12 +922,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                scsi_print_sense("", cmd);
                        scsi_print_command(cmd);
                }
-               if (blk_end_request_err(req, error))
-                       scsi_requeue_command(q, cmd);
-               else
-                       scsi_next_command(cmd);
-               break;
+               if (!blk_end_request_err(req, error))
+                       goto next_command;
+               /*FALLTHRU*/
        case ACTION_REPREP:
+       requeue:
                /* Unprep the request and put it back at the head of the queue.
                 * A new command will be prepared and issued.
                 */
@@ -1004,6 +942,11 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
                break;
        }
+       return;
+
+next_command:
+       scsi_release_buffers(cmd);
+       scsi_next_command(cmd);
 }
 
 static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
@@ -1128,15 +1071,7 @@ static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
 
 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
 {
-       struct scsi_cmnd *cmd;
-       int ret = scsi_prep_state_check(sdev, req);
-
-       if (ret != BLKPREP_OK)
-               return ret;
-
-       cmd = scsi_get_cmd_from_req(sdev, req);
-       if (unlikely(!cmd))
-               return BLKPREP_DEFER;
+       struct scsi_cmnd *cmd = req->special;
 
        /*
         * BLOCK_PC requests may transfer data, in which case they must
@@ -1179,15 +1114,11 @@ EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
  */
 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
 {
-       struct scsi_cmnd *cmd;
-       int ret = scsi_prep_state_check(sdev, req);
-
-       if (ret != BLKPREP_OK)
-               return ret;
+       struct scsi_cmnd *cmd = req->special;
 
        if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
                         && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
-               ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
+               int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
                if (ret != BLKPREP_OK)
                        return ret;
        }
@@ -1197,16 +1128,13 @@ int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
         */
        BUG_ON(!req->nr_phys_segments);
 
-       cmd = scsi_get_cmd_from_req(sdev, req);
-       if (unlikely(!cmd))
-               return BLKPREP_DEFER;
-
        memset(cmd->cmnd, 0, BLK_MAX_CDB);
        return scsi_init_io(cmd, GFP_ATOMIC);
 }
 EXPORT_SYMBOL(scsi_setup_fs_cmnd);
 
-int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
+static int
+scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
 {
        int ret = BLKPREP_OK;
 
@@ -1258,9 +1186,9 @@ int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
        }
        return ret;
 }
-EXPORT_SYMBOL(scsi_prep_state_check);
 
-int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
+static int
+scsi_prep_return(struct request_queue *q, struct request *req, int ret)
 {
        struct scsi_device *sdev = q->queuedata;
 
@@ -1291,18 +1219,44 @@ int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
 
        return ret;
 }
-EXPORT_SYMBOL(scsi_prep_return);
 
-int scsi_prep_fn(struct request_queue *q, struct request *req)
+static int scsi_prep_fn(struct request_queue *q, struct request *req)
 {
        struct scsi_device *sdev = q->queuedata;
-       int ret = BLKPREP_KILL;
+       struct scsi_cmnd *cmd;
+       int ret;
 
-       if (req->cmd_type == REQ_TYPE_BLOCK_PC)
+       ret = scsi_prep_state_check(sdev, req);
+       if (ret != BLKPREP_OK)
+               goto out;
+
+       cmd = scsi_get_cmd_from_req(sdev, req);
+       if (unlikely(!cmd)) {
+               ret = BLKPREP_DEFER;
+               goto out;
+       }
+
+       if (req->cmd_type == REQ_TYPE_FS)
+               ret = scsi_cmd_to_driver(cmd)->init_command(cmd);
+       else if (req->cmd_type == REQ_TYPE_BLOCK_PC)
                ret = scsi_setup_blk_pc_cmnd(sdev, req);
+       else
+               ret = BLKPREP_KILL;
+
+out:
        return scsi_prep_return(q, req, ret);
 }
-EXPORT_SYMBOL(scsi_prep_fn);
+
+static void scsi_unprep_fn(struct request_queue *q, struct request *req)
+{
+       if (req->cmd_type == REQ_TYPE_FS) {
+               struct scsi_cmnd *cmd = req->special;
+               struct scsi_driver *drv = scsi_cmd_to_driver(cmd);
+
+               if (drv->uninit_command)
+                       drv->uninit_command(cmd);
+       }
+}
 
 /*
  * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
@@ -1723,6 +1677,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
                return NULL;
 
        blk_queue_prep_rq(q, scsi_prep_fn);
+       blk_queue_unprep_rq(q, scsi_unprep_fn);
        blk_queue_softirq_done(q, scsi_softirq_done);
        blk_queue_rq_timed_out(q, scsi_times_out);
        blk_queue_lld_busy(q, scsi_lld_busy);
index 96af195224f2b9dae866ea109afb1d894c142c80..e9689d57ccb6cb558925b63091ff3a95accdef28 100644 (file)
@@ -109,6 +109,8 @@ static int sd_suspend_system(struct device *);
 static int sd_suspend_runtime(struct device *);
 static int sd_resume(struct device *);
 static void sd_rescan(struct device *);
+static int sd_init_command(struct scsi_cmnd *SCpnt);
+static void sd_uninit_command(struct scsi_cmnd *SCpnt);
 static int sd_done(struct scsi_cmnd *);
 static int sd_eh_action(struct scsi_cmnd *, int);
 static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
@@ -503,6 +505,8 @@ static struct scsi_driver sd_template = {
                .pm             = &sd_pm_ops,
        },
        .rescan                 = sd_rescan,
+       .init_command           = sd_init_command,
+       .uninit_command         = sd_uninit_command,
        .done                   = sd_done,
        .eh_action              = sd_eh_action,
 };
@@ -836,9 +840,9 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
        return scsi_setup_blk_pc_cmnd(sdp, rq);
 }
 
-static void sd_unprep_fn(struct request_queue *q, struct request *rq)
+static void sd_uninit_command(struct scsi_cmnd *SCpnt)
 {
-       struct scsi_cmnd *SCpnt = rq->special;
+       struct request *rq = SCpnt->request;
 
        if (rq->cmd_flags & REQ_DISCARD)
                __free_page(rq->completion_data);
@@ -850,18 +854,10 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
        }
 }
 
-/**
- *     sd_prep_fn - build a scsi (read or write) command from
- *     information in the request structure.
- *     @SCpnt: pointer to mid-level's per scsi command structure that
- *     contains request and into which the scsi command is written
- *
- *     Returns 1 if successful and 0 if error (or cannot be done now).
- **/
-static int sd_prep_fn(struct request_queue *q, struct request *rq)
+static int sd_init_command(struct scsi_cmnd *SCpnt)
 {
-       struct scsi_cmnd *SCpnt;
-       struct scsi_device *sdp = q->queuedata;
+       struct request *rq = SCpnt->request;
+       struct scsi_device *sdp = SCpnt->device;
        struct gendisk *disk = rq->rq_disk;
        struct scsi_disk *sdkp;
        sector_t block = blk_rq_pos(rq);
@@ -883,12 +879,6 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        } else if (rq->cmd_flags & REQ_FLUSH) {
                ret = scsi_setup_flush_cmnd(sdp, rq);
                goto out;
-       } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
-               ret = scsi_setup_blk_pc_cmnd(sdp, rq);
-               goto out;
-       } else if (rq->cmd_type != REQ_TYPE_FS) {
-               ret = BLKPREP_KILL;
-               goto out;
        }
        ret = scsi_setup_fs_cmnd(sdp, rq);
        if (ret != BLKPREP_OK)
@@ -900,11 +890,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
         * is used for a killable error condition */
        ret = BLKPREP_KILL;
 
-       SCSI_LOG_HLQUEUE(1, scmd_printk(KERN_INFO, SCpnt,
-                                       "sd_prep_fn: block=%llu, "
-                                       "count=%d\n",
-                                       (unsigned long long)block,
-                                       this_count));
+       SCSI_LOG_HLQUEUE(1,
+               scmd_printk(KERN_INFO, SCpnt,
+                       "%s: block=%llu, count=%d\n",
+                       __func__, (unsigned long long)block, this_count));
 
        if (!sdp || !scsi_device_online(sdp) ||
            block + blk_rq_sectors(rq) > get_capacity(disk)) {
@@ -1124,7 +1113,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
         */
        ret = BLKPREP_OK;
  out:
-       return scsi_prep_return(q, rq, ret);
+       return ret;
 }
 
 /**
@@ -1686,12 +1675,12 @@ static int sd_done(struct scsi_cmnd *SCpnt)
                                                   sshdr.ascq));
        }
 #endif
+       sdkp->medium_access_timed_out = 0;
+
        if (driver_byte(result) != DRIVER_SENSE &&
            (!sense_valid || sense_deferred))
                goto out;
 
-       sdkp->medium_access_timed_out = 0;
-
        switch (sshdr.sense_key) {
        case HARDWARE_ERROR:
        case MEDIUM_ERROR:
@@ -2875,9 +2864,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_revalidate_disk(gd);
 
-       blk_queue_prep_rq(sdp->request_queue, sd_prep_fn);
-       blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn);
-
        gd->driverfs_dev = &sdp->sdev_gendev;
        gd->flags = GENHD_FL_EXT_DEVT;
        if (sdp->removable) {
@@ -3025,8 +3011,6 @@ static int sd_remove(struct device *dev)
 
        async_synchronize_full_domain(&scsi_sd_pm_domain);
        async_synchronize_full_domain(&scsi_sd_probe_domain);
-       blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn);
-       blk_queue_unprep_rq(sdkp->device->request_queue, NULL);
        device_del(&sdkp->dev);
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
index 40d85929aefeac1e694a1462f8f5b553f5540d1f..93cbd36c990b7beef38f9c1a40b680bae4001789 100644 (file)
@@ -79,6 +79,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_WORM);
 static DEFINE_MUTEX(sr_mutex);
 static int sr_probe(struct device *);
 static int sr_remove(struct device *);
+static int sr_init_command(struct scsi_cmnd *SCpnt);
 static int sr_done(struct scsi_cmnd *);
 static int sr_runtime_suspend(struct device *dev);
 
@@ -94,6 +95,7 @@ static struct scsi_driver sr_template = {
                .remove         = sr_remove,
                .pm             = &sr_pm_ops,
        },
+       .init_command           = sr_init_command,
        .done                   = sr_done,
 };
 
@@ -378,21 +380,14 @@ static int sr_done(struct scsi_cmnd *SCpnt)
        return good_bytes;
 }
 
-static int sr_prep_fn(struct request_queue *q, struct request *rq)
+static int sr_init_command(struct scsi_cmnd *SCpnt)
 {
        int block = 0, this_count, s_size;
        struct scsi_cd *cd;
-       struct scsi_cmnd *SCpnt;
-       struct scsi_device *sdp = q->queuedata;
+       struct request *rq = SCpnt->request;
+       struct scsi_device *sdp = SCpnt->device;
        int ret;
 
-       if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
-               ret = scsi_setup_blk_pc_cmnd(sdp, rq);
-               goto out;
-       } else if (rq->cmd_type != REQ_TYPE_FS) {
-               ret = BLKPREP_KILL;
-               goto out;
-       }
        ret = scsi_setup_fs_cmnd(sdp, rq);
        if (ret != BLKPREP_OK)
                goto out;
@@ -517,7 +512,7 @@ static int sr_prep_fn(struct request_queue *q, struct request *rq)
         */
        ret = BLKPREP_OK;
  out:
-       return scsi_prep_return(q, rq, ret);
+       return ret;
 }
 
 static int sr_block_open(struct block_device *bdev, fmode_t mode)
@@ -718,7 +713,6 @@ static int sr_probe(struct device *dev)
 
        /* FIXME: need to handle a get_capabilities failure properly ?? */
        get_capabilities(cd);
-       blk_queue_prep_rq(sdev->request_queue, sr_prep_fn);
        sr_vendor_init(cd);
 
        disk->driverfs_dev = &sdev->sdev_gendev;
@@ -993,7 +987,6 @@ static int sr_remove(struct device *dev)
 
        scsi_autopm_get_device(cd->device);
 
-       blk_queue_prep_rq(cd->device->request_queue, scsi_prep_fn);
        del_gendisk(cd->disk);
 
        mutex_lock(&sr_ref_mutex);
index 636bbe0ea84cad96705dce323e7a03a0c8859f36..88220794cc986c07e8f3289f3222ef1bf1900922 100644 (file)
@@ -364,7 +364,7 @@ static int is_lun_busy(struct scsi_cmnd *cmd, int should_be_tagged)
        return( 0 );
     if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >=
        TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) {
-       TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n",
+       dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d: no free tags\n",
                    H_NO(cmd), cmd->device->id, cmd->device->lun );
        return( 1 );
     }
@@ -388,7 +388,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
        !setup_use_tagged_queuing || !cmd->device->tagged_supported) {
        cmd->tag = TAG_NONE;
        hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
-       TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged "
+       dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d now allocated by untagged "
                    "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun );
     }
     else {
@@ -397,7 +397,7 @@ static void cmd_get_tag(struct scsi_cmnd *cmd, int should_be_tagged)
        cmd->tag = find_first_zero_bit( &ta->allocated, MAX_TAGS );
        set_bit( cmd->tag, &ta->allocated );
        ta->nr_allocated++;
-       TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d "
+       dprintk(NDEBUG_TAGS,  "scsi%d: using tag %d for target %d lun %d "
                    "(now %d tags in use)\n",
                    H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun,
                    ta->nr_allocated );
@@ -415,7 +415,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
 
     if (cmd->tag == TAG_NONE) {
        hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun);
-       TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n",
+       dprintk(NDEBUG_TAGS,  "scsi%d: target %d lun %d untagged cmd finished\n",
                    H_NO(cmd), cmd->device->id, cmd->device->lun );
     }
     else if (cmd->tag >= MAX_TAGS) {
@@ -426,7 +426,7 @@ static void cmd_free_tag(struct scsi_cmnd *cmd)
        TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
        clear_bit( cmd->tag, &ta->allocated );
        ta->nr_allocated--;
-       TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n",
+       dprintk(NDEBUG_TAGS,  "scsi%d: freed tag %d for target %d lun %d\n",
                    H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun );
     }
 }
@@ -484,7 +484,7 @@ static __inline__ void initialize_SCp(struct scsi_cmnd *cmd)
 
 #include <linux/delay.h>
 
-#if 1
+#if NDEBUG
 static struct {
     unsigned char mask;
     const char * name;} 
@@ -572,12 +572,6 @@ static void NCR5380_print_phase(struct Scsi_Host *instance)
     }
 }
 
-#else /* !NDEBUG */
-
-/* dummies... */
-__inline__ void NCR5380_print(struct Scsi_Host *instance) { };
-__inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { };
-
 #endif
 
 /*
@@ -618,7 +612,7 @@ static inline void NCR5380_all_init (void)
 {
     static int done = 0;
     if (!done) {
-       INI_PRINTK("scsi : NCR5380_all_init()\n");
+       dprintk(NDEBUG_INIT, "scsi : NCR5380_all_init()\n");
        done = 1;
     }
 }
@@ -681,8 +675,8 @@ static void NCR5380_print_status(struct Scsi_Host *instance)
        Scsi_Cmnd *ptr;
        unsigned long flags;
 
-       NCR_PRINT(NDEBUG_ANY);
-       NCR_PRINT_PHASE(NDEBUG_ANY);
+       NCR5380_dprint(NDEBUG_ANY, instance);
+       NCR5380_dprint_phase(NDEBUG_ANY, instance);
 
        hostdata = (struct NCR5380_hostdata *)instance->hostdata;
 
@@ -928,7 +922,7 @@ static int NCR5380_queue_command_lck(struct scsi_cmnd *cmd,
 
     local_irq_restore(flags);
 
-    QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd),
+    dprintk(NDEBUG_QUEUES, "scsi%d: command added to %s of queue\n", H_NO(cmd),
              (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail");
 
     /* If queue_command() is called from an interrupt (real one or bottom
@@ -998,7 +992,7 @@ static void NCR5380_main (struct work_struct *bl)
        done = 1;
        
        if (!hostdata->connected) {
-           MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO );
+           dprintk(NDEBUG_MAIN,  "scsi%d: not connected\n", HOSTNO );
            /*
             * Search through the issue_queue for a command destined
             * for a target that's not busy.
@@ -1012,12 +1006,8 @@ static void NCR5380_main (struct work_struct *bl)
            for (tmp = (struct scsi_cmnd *) hostdata->issue_queue,
                 prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) {
 
-#if (NDEBUG & NDEBUG_LISTS)
                if (prev != tmp)
-                   printk("MAIN tmp=%p   target=%d   busy=%d lun=%d\n",
-                          tmp, tmp->target, hostdata->busy[tmp->target],
-                          tmp->lun);
-#endif
+                       dprintk(NDEBUG_LISTS, "MAIN tmp=%p   target=%d   busy=%d lun=%d\n", tmp, tmp->device->id, hostdata->busy[tmp->device->id], tmp->device->lun);
                /*  When we find one, remove it from the issue queue. */
                /* ++guenther: possible race with Falcon locking */
                if (
@@ -1047,9 +1037,9 @@ static void NCR5380_main (struct work_struct *bl)
                     * On failure, we must add the command back to the
                     *   issue queue so we can keep trying.     
                     */
-                   MAIN_PRINTK("scsi%d: main(): command for target %d "
+                   dprintk(NDEBUG_MAIN, "scsi%d: main(): command for target %d "
                                "lun %d removed from issue_queue\n",
-                               HOSTNO, tmp->target, tmp->lun);
+                               HOSTNO, tmp->device->id, tmp->device->lun);
                    /* 
                     * REQUEST SENSE commands are issued without tagged
                     * queueing, even on SCSI-II devices because the 
@@ -1076,7 +1066,7 @@ static void NCR5380_main (struct work_struct *bl)
                        cmd_free_tag( tmp );
 #endif
                        local_irq_restore(flags);
-                       MAIN_PRINTK("scsi%d: main(): select() failed, "
+                       dprintk(NDEBUG_MAIN, "scsi%d: main(): select() failed, "
                                    "returned to issue_queue\n", HOSTNO);
                        if (hostdata->connected)
                            break;
@@ -1090,10 +1080,10 @@ static void NCR5380_main (struct work_struct *bl)
 #endif
            ) {
            local_irq_restore(flags);
-           MAIN_PRINTK("scsi%d: main: performing information transfer\n",
+           dprintk(NDEBUG_MAIN, "scsi%d: main: performing information transfer\n",
                        HOSTNO);
            NCR5380_information_transfer(instance);
-           MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO);
+           dprintk(NDEBUG_MAIN, "scsi%d: main: done set false\n", HOSTNO);
            done = 0;
        }
     } while (!done);
@@ -1130,7 +1120,7 @@ static void NCR5380_dma_complete( struct Scsi_Host *instance )
        return;
     }
 
-    DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
+    dprintk(NDEBUG_DMA, "scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n",
               HOSTNO, NCR5380_read(BUS_AND_STATUS_REG),
               NCR5380_read(STATUS_REG));
 
@@ -1189,27 +1179,27 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
     int done = 1, handled = 0;
     unsigned char basr;
 
-    INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO);
+    dprintk(NDEBUG_INTR, "scsi%d: NCR5380 irq triggered\n", HOSTNO);
 
     /* Look for pending interrupts */
     basr = NCR5380_read(BUS_AND_STATUS_REG);
-    INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr);
+    dprintk(NDEBUG_INTR, "scsi%d: BASR=%02x\n", HOSTNO, basr);
     /* dispatch to appropriate routine if found and done=0 */
     if (basr & BASR_IRQ) {
-       NCR_PRINT(NDEBUG_INTR);
+       NCR5380_dprint(NDEBUG_INTR, instance);
        if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) {
            done = 0;
 //         ENABLE_IRQ();
-           INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO);
+           dprintk(NDEBUG_INTR, "scsi%d: SEL interrupt\n", HOSTNO);
            NCR5380_reselect(instance);
            (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
        }
        else if (basr & BASR_PARITY_ERROR) {
-           INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO);
+           dprintk(NDEBUG_INTR, "scsi%d: PARITY interrupt\n", HOSTNO);
            (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG);
        }
        else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) {
-           INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO);
+           dprintk(NDEBUG_INTR, "scsi%d: RESET interrupt\n", HOSTNO);
            (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG);
        }
        else {
@@ -1229,7 +1219,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
                ((basr & BASR_END_DMA_TRANSFER) || 
                 !(basr & BASR_PHASE_MATCH))) {
                    
-               INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
+               dprintk(NDEBUG_INTR, "scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO);
                NCR5380_dma_complete( instance );
                done = 0;
 //             ENABLE_IRQ();
@@ -1238,7 +1228,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
            {
 /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */
                if (basr & BASR_PHASE_MATCH)
-                  INT_PRINTK("scsi%d: unknown interrupt, "
+                  dprintk(NDEBUG_INTR, "scsi%d: unknown interrupt, "
                           "BASR 0x%x, MR 0x%x, SR 0x%x\n",
                           HOSTNO, basr, NCR5380_read(MODE_REG),
                           NCR5380_read(STATUS_REG));
@@ -1262,7 +1252,7 @@ static irqreturn_t NCR5380_intr (int irq, void *dev_id)
     }
     
     if (!done) {
-       INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO);
+       dprintk(NDEBUG_INTR, "scsi%d: in int routine, calling main\n", HOSTNO);
        /* Put a call to NCR5380_main() on the queue... */
        queue_main();
     }
@@ -1338,8 +1328,8 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
     unsigned long flags;
 
     hostdata->restart_select = 0;
-    NCR_PRINT(NDEBUG_ARBITRATION);
-    ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO,
+    NCR5380_dprint(NDEBUG_ARBITRATION, instance);
+    dprintk(NDEBUG_ARBITRATION, "scsi%d: starting arbitration, id = %d\n", HOSTNO,
               instance->this_id);
 
     /* 
@@ -1385,7 +1375,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
         && !hostdata->connected);
 #endif
 
-    ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO);
+    dprintk(NDEBUG_ARBITRATION, "scsi%d: arbitration complete\n", HOSTNO);
 
     if (hostdata->connected) {
        NCR5380_write(MODE_REG, MR_BASE); 
@@ -1406,7 +1396,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
        (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) ||
        hostdata->connected) {
        NCR5380_write(MODE_REG, MR_BASE); 
-       ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
+       dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting MR_ARBITRATE\n",
                   HOSTNO);
        return -1;
     }
@@ -1421,7 +1411,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
        hostdata->connected) {
        NCR5380_write(MODE_REG, MR_BASE);
        NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
-       ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
+       dprintk(NDEBUG_ARBITRATION, "scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n",
                   HOSTNO);
        return -1;
     }
@@ -1444,7 +1434,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
        return -1;
     }
 
-    ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO);
+    dprintk(NDEBUG_ARBITRATION, "scsi%d: won arbitration\n", HOSTNO);
 
     /* 
      * Now that we have won arbitration, start Selection process, asserting 
@@ -1504,7 +1494,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
 
     udelay(1);
 
-    SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
+    dprintk(NDEBUG_SELECTION, "scsi%d: selecting target %d\n", HOSTNO, cmd->device->id);
 
     /* 
      * The SCSI specification calls for a 250 ms timeout for the actual 
@@ -1559,7 +1549,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
            printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO);
            if (hostdata->restart_select)
                printk(KERN_NOTICE "\trestart select\n");
-           NCR_PRINT(NDEBUG_ANY);
+           NCR5380_dprint(NDEBUG_ANY, instance);
            NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
            return -1;
        }
@@ -1572,7 +1562,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
 #endif
        cmd->scsi_done(cmd);
        NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
-       SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO);
+       dprintk(NDEBUG_SELECTION, "scsi%d: target did not respond within 250ms\n", HOSTNO);
        NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask);
        return 0;
     } 
@@ -1597,7 +1587,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
     /* Wait for start of REQ/ACK handshake */
     while (!(NCR5380_read(STATUS_REG) & SR_REQ));
 
-    SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
+    dprintk(NDEBUG_SELECTION, "scsi%d: target %d selected, going into MESSAGE OUT phase.\n",
               HOSTNO, cmd->device->id);
     tmp[0] = IDENTIFY(1, cmd->device->lun);
 
@@ -1617,7 +1607,7 @@ static int NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd,
     data = tmp;
     phase = PHASE_MSGOUT;
     NCR5380_transfer_pio(instance, &phase, &len, &data);
-    SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO);
+    dprintk(NDEBUG_SELECTION, "scsi%d: nexus established.\n", HOSTNO);
     /* XXX need to handle errors here */
     hostdata->connected = cmd;
 #ifndef SUPPORT_TAGS
@@ -1680,12 +1670,12 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
         */
        while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
 
-       HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO);
+       dprintk(NDEBUG_HANDSHAKE, "scsi%d: REQ detected\n", HOSTNO);
 
        /* Check for phase mismatch */  
        if ((tmp & PHASE_MASK) != p) {
-           PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO);
-           NCR_PRINT_PHASE(NDEBUG_PIO);
+           dprintk(NDEBUG_PIO, "scsi%d: phase mismatch\n", HOSTNO);
+           NCR5380_dprint_phase(NDEBUG_PIO, instance);
            break;
        }
 
@@ -1708,24 +1698,24 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
            if (!((p & SR_MSG) && c > 1)) {
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 
                    ICR_ASSERT_DATA);
-               NCR_PRINT(NDEBUG_PIO);
+               NCR5380_dprint(NDEBUG_PIO, instance);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 
                        ICR_ASSERT_DATA | ICR_ASSERT_ACK);
            } else {
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE |
                    ICR_ASSERT_DATA | ICR_ASSERT_ATN);
-               NCR_PRINT(NDEBUG_PIO);
+               NCR5380_dprint(NDEBUG_PIO, instance);
                NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | 
                    ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK);
            }
        } else {
-           NCR_PRINT(NDEBUG_PIO);
+           NCR5380_dprint(NDEBUG_PIO, instance);
            NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK);
        }
 
        while (NCR5380_read(STATUS_REG) & SR_REQ);
 
-       HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO);
+       dprintk(NDEBUG_HANDSHAKE, "scsi%d: req false, handshake complete\n", HOSTNO);
 
 /*
  * We have several special cases to consider during REQ/ACK handshaking : 
@@ -1746,7 +1736,7 @@ static int NCR5380_transfer_pio( struct Scsi_Host *instance,
        } 
     } while (--c);
 
-    PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c);
+    dprintk(NDEBUG_PIO, "scsi%d: residual %d\n", HOSTNO, c);
 
     *count = c;
     *data = d;
@@ -1854,7 +1844,7 @@ static int NCR5380_transfer_dma( struct Scsi_Host *instance,
     }
     hostdata->dma_len = c;
 
-    DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n",
+    dprintk(NDEBUG_DMA, "scsi%d: initializing DMA for %s, %d bytes %s %p\n",
               HOSTNO, (p & SR_IO) ? "reading" : "writing",
               c, (p & SR_IO) ? "to" : "from", *data);
 
@@ -1931,7 +1921,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
            phase = (tmp & PHASE_MASK); 
            if (phase != old_phase) {
                old_phase = phase;
-               NCR_PRINT_PHASE(NDEBUG_INFORMATION);
+               NCR5380_dprint_phase(NDEBUG_INFORMATION, instance);
            }
 
            if(phase == PHASE_CMDOUT) {
@@ -1996,7 +1986,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    --cmd->SCp.buffers_residual;
                    cmd->SCp.this_residual = cmd->SCp.buffer->length;
                    cmd->SCp.ptr = SGADDR(cmd->SCp.buffer);
-                   INF_PRINTK("scsi%d: %d bytes and %d buffers left\n",
+                   dprintk(NDEBUG_INFORMATION, "scsi%d: %d bytes and %d buffers left\n",
                               HOSTNO, cmd->SCp.this_residual,
                               cmd->SCp.buffers_residual);
                }
@@ -2088,7 +2078,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    /* Accept message by clearing ACK */
                    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                    
-                   LNK_PRINTK("scsi%d: target %d lun %d linked command "
+                   dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked command "
                               "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun);
 
                    /* Enable reselect interrupts */
@@ -2113,7 +2103,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                     * and don't free it! */
                    cmd->next_link->tag = cmd->tag;
                    cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); 
-                   LNK_PRINTK("scsi%d: target %d lun %d linked request "
+                   dprintk(NDEBUG_LINKED, "scsi%d: target %d lun %d linked request "
                               "done, calling scsi_done().\n",
                               HOSTNO, cmd->device->id, cmd->device->lun);
 #ifdef NCR5380_STATS
@@ -2128,7 +2118,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    /* Accept message by clearing ACK */
                    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
                    hostdata->connected = NULL;
-                   QU_PRINTK("scsi%d: command for target %d, lun %d "
+                   dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d, lun %d "
                              "completed\n", HOSTNO, cmd->device->id, cmd->device->lun);
 #ifdef SUPPORT_TAGS
                    cmd_free_tag( cmd );
@@ -2142,7 +2132,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                        /* ++Andreas: the mid level code knows about
                           QUEUE_FULL now. */
                        TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun];
-                       TAG_PRINTK("scsi%d: target %d lun %d returned "
+                       dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d returned "
                                   "QUEUE_FULL after %d commands\n",
                                   HOSTNO, cmd->device->id, cmd->device->lun,
                                   ta->nr_allocated);
@@ -2186,7 +2176,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    if ((cmd->cmnd[0] != REQUEST_SENSE) && 
                        (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) {
                        scsi_eh_prep_cmnd(cmd, &hostdata->ses, NULL, 0, ~0);
-                       ASEN_PRINTK("scsi%d: performing request sense\n",
+                       dprintk(NDEBUG_AUTOSENSE, "scsi%d: performing request sense\n",
                                    HOSTNO);
                        /* this is initialized from initialize_SCp 
                        cmd->SCp.buffer = NULL;
@@ -2198,7 +2188,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                        SET_NEXT(cmd, hostdata->issue_queue);
                        hostdata->issue_queue = (struct scsi_cmnd *) cmd;
                        local_irq_restore(flags);
-                       QU_PRINTK("scsi%d: REQUEST SENSE added to head of "
+                       dprintk(NDEBUG_QUEUES, "scsi%d: REQUEST SENSE added to head of "
                                  "issue queue\n", H_NO(cmd));
                   } else
 #endif /* def AUTOSENSE */
@@ -2238,7 +2228,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                        cmd->device->tagged_supported = 0;
                        hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun);
                        cmd->tag = TAG_NONE;
-                       TAG_PRINTK("scsi%d: target %d lun %d rejected "
+                       dprintk(NDEBUG_TAGS, "scsi%d: target %d lun %d rejected "
                                   "QUEUE_TAG message; tagged queuing "
                                   "disabled\n",
                                   HOSTNO, cmd->device->id, cmd->device->lun);
@@ -2255,7 +2245,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    hostdata->connected = NULL;
                    hostdata->disconnected_queue = cmd;
                    local_irq_restore(flags);
-                   QU_PRINTK("scsi%d: command for target %d lun %d was "
+                   dprintk(NDEBUG_QUEUES, "scsi%d: command for target %d lun %d was "
                              "moved from connected to the "
                              "disconnected_queue\n", HOSTNO, 
                              cmd->device->id, cmd->device->lun);
@@ -2308,13 +2298,13 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                    /* Accept first byte by clearing ACK */
                    NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE);
 
-                   EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO);
+                   dprintk(NDEBUG_EXTENDED, "scsi%d: receiving extended message\n", HOSTNO);
 
                    len = 2;
                    data = extended_msg + 1;
                    phase = PHASE_MSGIN;
                    NCR5380_transfer_pio(instance, &phase, &len, &data);
-                   EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO,
+                   dprintk(NDEBUG_EXTENDED, "scsi%d: length=%d, code=0x%02x\n", HOSTNO,
                               (int)extended_msg[1], (int)extended_msg[2]);
 
                    if (!len && extended_msg[1] <= 
@@ -2326,7 +2316,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                        phase = PHASE_MSGIN;
 
                        NCR5380_transfer_pio(instance, &phase, &len, &data);
-                       EXT_PRINTK("scsi%d: message received, residual %d\n",
+                       dprintk(NDEBUG_EXTENDED, "scsi%d: message received, residual %d\n",
                                   HOSTNO, len);
 
                        switch (extended_msg[2]) {
@@ -2416,7 +2406,7 @@ static void NCR5380_information_transfer (struct Scsi_Host *instance)
                break;
            default:
                printk("scsi%d: unknown phase\n", HOSTNO);
-               NCR_PRINT(NDEBUG_ANY);
+               NCR5380_dprint(NDEBUG_ANY, instance);
            } /* switch(phase) */
        } /* if (tmp * SR_REQ) */ 
     } /* while (1) */
@@ -2458,7 +2448,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
 
     target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask);
 
-    RSL_PRINTK("scsi%d: reselect\n", HOSTNO);
+    dprintk(NDEBUG_RESELECTION, "scsi%d: reselect\n", HOSTNO);
 
     /* 
      * At this point, we have detected that our SCSI ID is on the bus,
@@ -2580,14 +2570,14 @@ static void NCR5380_reselect (struct Scsi_Host *instance)
        if (!NCR5380_transfer_pio(instance, &phase, &len, &data) &&
            msg[1] == SIMPLE_QUEUE_TAG)
            tag = msg[2];
-       TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at "
+       dprintk(NDEBUG_TAGS, "scsi%d: target mask %02x, lun %d sent tag %d at "
                   "reselection\n", HOSTNO, target_mask, lun, tag);
     }
 #endif
     
     hostdata->connected = tmp;
-    RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
-              HOSTNO, tmp->target, tmp->lun, tmp->tag);
+    dprintk(NDEBUG_RESELECTION, "scsi%d: nexus established, target = %d, lun = %d, tag = %d\n",
+              HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag);
 }
 
 
@@ -2622,7 +2612,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 
     local_irq_save(flags);
     
-    ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
+    dprintk(NDEBUG_ABORT, "scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO,
                NCR5380_read(BUS_AND_STATUS_REG),
                NCR5380_read(STATUS_REG));
 
@@ -2635,7 +2625,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 
     if (hostdata->connected == cmd) {
 
-       ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO);
+       dprintk(NDEBUG_ABORT, "scsi%d: aborting connected command\n", HOSTNO);
 /*
  * We should perform BSY checking, and make sure we haven't slipped
  * into BUS FREE.
@@ -2664,11 +2654,11 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 #endif
          local_irq_restore(flags);
          cmd->scsi_done(cmd);
-         return SCSI_ABORT_SUCCESS;
+         return SUCCESS;
        } else {
 /*       local_irq_restore(flags); */
          printk("scsi%d: abort of connected command failed!\n", HOSTNO);
-         return SCSI_ABORT_ERROR;
+         return FAILED;
        } 
    }
 #endif
@@ -2686,12 +2676,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
            SET_NEXT(tmp, NULL);
            tmp->result = DID_ABORT << 16;
            local_irq_restore(flags);
-           ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n",
+           dprintk(NDEBUG_ABORT, "scsi%d: abort removed command from issue queue.\n",
                        HOSTNO);
            /* Tagged queuing note: no tag to free here, hasn't been assigned
             * yet... */
            tmp->scsi_done(tmp);
-           return SCSI_ABORT_SUCCESS;
+           return SUCCESS;
        }
 
 /* 
@@ -2707,8 +2697,8 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 
     if (hostdata->connected) {
        local_irq_restore(flags);
-       ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO);
-        return SCSI_ABORT_SNOOZE;
+       dprintk(NDEBUG_ABORT, "scsi%d: abort failed, command connected.\n", HOSTNO);
+        return FAILED;
     }
 
 /*
@@ -2740,12 +2730,12 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
         tmp = NEXT(tmp)) 
         if (cmd == tmp) {
             local_irq_restore(flags);
-           ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO);
+           dprintk(NDEBUG_ABORT, "scsi%d: aborting disconnected command.\n", HOSTNO);
   
             if (NCR5380_select (instance, cmd, (int) cmd->tag)) 
-               return SCSI_ABORT_BUSY;
+               return FAILED;
 
-           ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO);
+           dprintk(NDEBUG_ABORT, "scsi%d: nexus reestablished.\n", HOSTNO);
 
            do_abort (instance);
 
@@ -2769,7 +2759,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
 #endif
                    local_irq_restore(flags);
                    tmp->scsi_done(tmp);
-                   return SCSI_ABORT_SUCCESS;
+                   return SUCCESS;
                }
        }
 
@@ -2786,7 +2776,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
     local_irq_restore(flags);
     printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully before abortion\n", HOSTNO); 
 
-    return SCSI_ABORT_NOT_RUNNING;
+    return FAILED;
 }
 
 
@@ -2795,7 +2785,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd)
  * 
  * Purpose : reset the SCSI bus.
  *
- * Returns : SCSI_RESET_WAKEUP
+ * Returns : SUCCESS or FAILURE
  *
  */ 
 
@@ -2804,7 +2794,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
     SETUP_HOSTDATA(cmd->device->host);
     int           i;
     unsigned long flags;
-#if 1
+#if defined(RESET_RUN_DONE)
     struct scsi_cmnd *connected, *disconnected_queue;
 #endif
 
@@ -2826,8 +2816,15 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
      * through anymore ... */
     (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG );
 
-#if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */
-      /* XXX see below                                            XXX */
+       /* MSch 20140115 - looking at the generic NCR5380 driver, all of this
+        * should go.
+        * Catch-22: if we don't clear all queues, the SCSI driver lock will
+        * not be released by atari_scsi_reset()!
+        */
+
+#if defined(RESET_RUN_DONE)
+       /* XXX Should now be done by midlevel code, but it's broken XXX */
+       /* XXX see below                                            XXX */
 
     /* MSch: old-style reset: actually abort all command processing here */
 
@@ -2857,7 +2854,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
      */
 
     if ((cmd = connected)) {
-       ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+       dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
        cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16);
        cmd->scsi_done( cmd );
     }
@@ -2869,14 +2866,14 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
        cmd->scsi_done( cmd );
     }
     if (i > 0)
-       ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i);
+       dprintk(NDEBUG_ABORT, "scsi: reset aborted %d disconnected command(s)\n", i);
 
 
     /* since all commands have been explicitly terminated, we need to tell
      * the midlevel code that the reset was SUCCESSFUL, and there is no 
      * need to 'wake up' the commands by a request_sense
      */
-    return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET;
+    return SUCCESS;
 #else /* 1 */
 
     /* MSch: new-style reset handling: let the mid-level do what it can */
@@ -2903,11 +2900,11 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
      */
 
     if (hostdata->issue_queue)
-       ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
+       dprintk(NDEBUG_ABORT, "scsi%d: reset aborted issued command(s)\n", H_NO(cmd));
     if (hostdata->connected) 
-       ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd));
+       dprintk(NDEBUG_ABORT, "scsi%d: reset aborted a connected command\n", H_NO(cmd));
     if (hostdata->disconnected_queue)
-       ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
+       dprintk(NDEBUG_ABORT, "scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd));
 
     local_irq_save(flags);
     hostdata->issue_queue = NULL;
@@ -2924,7 +2921,7 @@ static int NCR5380_bus_reset(struct scsi_cmnd *cmd)
     local_irq_restore(flags);
 
     /* we did no complete reset of all commands, so a wakeup is required */
-    return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET;
+    return SUCCESS;
 #endif /* 1 */
 }
 
index e2c009b033cec84f6ca3b1ceabb2e9d1798a4acb..9707b7494a89644e99c2629127f7a612ccabf6ec 100644 (file)
@@ -3,6 +3,10 @@
  *
  * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
  *
+ * VME support added by Sam Creasey
+ *
+ * TODO: modify this driver to support multiple Sun3 SCSI VME boards
+ *
  * Adapted from mac_scsinew.c:
  */
 /*
  * USLEEP - enable support for devices that don't disconnect.  Untested.
  */
 
-/*
- * $Log: sun3_NCR5380.c,v $
- */
-
 #define AUTOSENSE
 
 #include <linux/types.h>
 #include <asm/idprom.h>
 #include <asm/machines.h>
 
-#define NDEBUG 0
-
-#define NDEBUG_ABORT           0x00100000
-#define NDEBUG_TAGS            0x00200000
-#define NDEBUG_MERGING         0x00400000
-
 /* dma on! */
 #define REAL_DMA
 
 #include "scsi.h"
-#include "initio.h"
 #include <scsi/scsi_host.h>
 #include "sun3_scsi.h"
+#include "NCR5380.h"
 
-static void NCR5380_print(struct Scsi_Host *instance);
-
-/* #define OLDDMA */
+extern int sun3_map_test(unsigned long, char *);
 
 #define USE_WRAPPER
 /*#define RESET_BOOT */
@@ -101,7 +93,11 @@ static void NCR5380_print(struct Scsi_Host *instance);
 
 /* #define SUPPORT_TAGS */
 
+#ifdef SUN3_SCSI_VME
+#define ENABLE_IRQ()
+#else
 #define        ENABLE_IRQ()    enable_irq( IRQ_SUN3_SCSI ); 
+#endif
 
 
 static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
@@ -123,6 +119,8 @@ module_param(setup_hostid, int, 0);
 
 static struct scsi_cmnd *sun3_dma_setup_done = NULL;
 
+#define        RESET_RUN_DONE
+
 #define        AFTER_RESET_DELAY       (HZ/2)
 
 /* ms to wait after hitting dma regs */
@@ -136,10 +134,9 @@ static struct scsi_cmnd *sun3_dma_setup_done = NULL;
 
 static volatile unsigned char *sun3_scsi_regp;
 static volatile struct sun3_dma_regs *dregs;
-#ifdef OLDDMA
-static unsigned char *dmabuf = NULL; /* dma memory buffer */
-#endif
+#ifndef SUN3_SCSI_VME
 static struct sun3_udc_regs *udc_regs = NULL;
+#endif
 static unsigned char *sun3_dma_orig_addr = NULL;
 static unsigned long sun3_dma_orig_count = 0;
 static int sun3_dma_active = 0;
@@ -159,6 +156,7 @@ static inline void sun3scsi_write(int reg, int value)
        sun3_scsi_regp[reg] = value;
 }
 
+#ifndef SUN3_SCSI_VME
 /* dma controller register access functions */
 
 static inline unsigned short sun3_udc_read(unsigned char reg)
@@ -180,6 +178,7 @@ static inline void sun3_udc_write(unsigned short val, unsigned char reg)
        dregs->udc_data = val;
        udelay(SUN3_DMA_DELAY);
 }
+#endif
 
 /*
  * XXX: status debug
@@ -198,17 +197,32 @@ static struct Scsi_Host *default_instance;
  *
  */
  
-int __init sun3scsi_detect(struct scsi_host_template * tpnt)
+static int __init sun3scsi_detect(struct scsi_host_template *tpnt)
 {
-       unsigned long ioaddr;
+       unsigned long ioaddr, irq;
        static int called = 0;
        struct Scsi_Host *instance;
+#ifdef SUN3_SCSI_VME
+       int i;
+       unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI,
+                                  IOBASE_SUN3_VMESCSI + 0x4000,
+                                  0 };
+       unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
+                                 SUN3_VEC_VMESCSI1,
+                                 0 };
+#endif
 
        /* check that this machine has an onboard 5380 */
        switch(idprom->id_machtype) {
+#ifdef SUN3_SCSI_VME
+       case SM_SUN3|SM_3_160:
+       case SM_SUN3|SM_3_260:
+               break;
+#else
        case SM_SUN3|SM_3_50:
        case SM_SUN3|SM_3_60:
                break;
+#endif
 
        default:
                return 0;
@@ -217,7 +231,11 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
        if(called)
                return 0;
 
+#ifdef SUN3_SCSI_VME
+       tpnt->proc_name = "Sun3 5380 VME SCSI";
+#else
        tpnt->proc_name = "Sun3 5380 SCSI";
+#endif
 
        /* setup variables */
        tpnt->can_queue =
@@ -234,6 +252,38 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
                tpnt->this_id = 7;
        }
 
+#ifdef SUN3_SCSI_VME
+       ioaddr = 0;
+       for (i = 0; addrs[i] != 0; i++) {
+               unsigned char x;
+
+               ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
+                                                    SUN3_PAGE_TYPE_VME16);
+               irq = vecs[i];
+               sun3_scsi_regp = (unsigned char *)ioaddr;
+
+               dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
+
+               if (sun3_map_test((unsigned long)dregs, &x)) {
+                       unsigned short oldcsr;
+
+                       oldcsr = dregs->csr;
+                       dregs->csr = 0;
+                       udelay(SUN3_DMA_DELAY);
+                       if (dregs->csr == 0x1400)
+                               break;
+
+                       dregs->csr = oldcsr;
+               }
+
+               iounmap((void *)ioaddr);
+               ioaddr = 0;
+       }
+
+       if (!ioaddr)
+               return 0;
+#else
+       irq = IRQ_SUN3_SCSI;
        ioaddr = (unsigned long)ioremap(IOBASE_SUN3_SCSI, PAGE_SIZE);
        sun3_scsi_regp = (unsigned char *)ioaddr;
 
@@ -244,11 +294,6 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
             printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
             return 0;
        }
-#ifdef OLDDMA
-       if((dmabuf = dvma_malloc_align(SUN3_DVMA_BUFSIZE, 0x10000)) == NULL) {
-            printk("SUN3 Scsi couldn't allocate DVMA memory!\n");
-            return 0;
-       }
 #endif
 #ifdef SUPPORT_TAGS
        if (setup_use_tagged_queuing < 0)
@@ -262,7 +307,7 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
        default_instance = instance;
 
         instance->io_port = (unsigned long) ioaddr;
-       instance->irq = IRQ_SUN3_SCSI;
+       instance->irq = irq;
 
        NCR5380_init(instance, 0);
 
@@ -283,7 +328,8 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
 #endif
        }
        
-       printk("scsi%d: Sun3 5380 at port %lX irq", instance->host_no, instance->io_port);
+       pr_info("scsi%d: %s at port %lX irq", instance->host_no,
+               tpnt->proc_name, instance->io_port);
        if (instance->irq == SCSI_IRQ_NONE)
                printk ("s disabled");
        else
@@ -300,6 +346,15 @@ int __init sun3scsi_detect(struct scsi_host_template * tpnt)
        dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
        udelay(SUN3_DMA_DELAY);
        dregs->fifo_count = 0;
+#ifdef SUN3_SCSI_VME
+       dregs->fifo_count_hi = 0;
+       dregs->dma_addr_hi = 0;
+       dregs->dma_addr_lo = 0;
+       dregs->dma_count_hi = 0;
+       dregs->dma_count_lo = 0;
+
+       dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
+#endif
 
        called = 1;
 
@@ -367,7 +422,8 @@ static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
 }
 #endif
 
-const char * sun3scsi_info (struct Scsi_Host *spnt) {
+static const char *sun3scsi_info(struct Scsi_Host *spnt)
+{
     return "";
 }
 
@@ -379,6 +435,10 @@ static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
        unsigned short csr = dregs->csr;
        int handled = 0;
 
+#ifdef SUN3_SCSI_VME
+       dregs->csr &= ~CSR_DMA_ENABLE;
+#endif
+
        if(csr & ~CSR_GOOD) {
                if(csr & CSR_DMA_BUSERR) {
                        printk("scsi%d: bus error in dma\n", default_instance->host_no);
@@ -422,31 +482,28 @@ void sun3_sun3_debug (void)
 /* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
 static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
 {
-#ifdef OLDDMA
-       if(write_flag) 
-               memcpy(dmabuf, data, count);
-       else {
-               sun3_dma_orig_addr = data;
-               sun3_dma_orig_count = count;
-       }
-#else
        void *addr;
 
        if(sun3_dma_orig_addr != NULL)
                dvma_unmap(sun3_dma_orig_addr);
 
-//     addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf);
+#ifdef SUN3_SCSI_VME
+       addr = (void *)dvma_map_vme((unsigned long) data, count);
+#else
        addr = (void *)dvma_map((unsigned long) data, count);
+#endif
                
        sun3_dma_orig_addr = addr;
        sun3_dma_orig_count = count;
-#endif
+
+#ifndef SUN3_SCSI_VME
        dregs->fifo_count = 0;
        sun3_udc_write(UDC_RESET, UDC_CSR);
        
        /* reset fifo */
        dregs->csr &= ~CSR_FIFO;
        dregs->csr |= CSR_FIFO;
+#endif
        
        /* set direction */
        if(write_flag)
@@ -454,6 +511,17 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
        else
                dregs->csr &= ~CSR_SEND;
        
+#ifdef SUN3_SCSI_VME
+       dregs->csr |= CSR_PACK_ENABLE;
+
+       dregs->dma_addr_hi = ((unsigned long)addr >> 16);
+       dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
+
+       dregs->dma_count_hi = 0;
+       dregs->dma_count_lo = 0;
+       dregs->fifo_count_hi = 0;
+       dregs->fifo_count = 0;
+#else
        /* byte count for fifo */
        dregs->fifo_count = count;
 
@@ -467,17 +535,12 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
                printk("scsi%d: fifo_mismatch %04x not %04x\n",
                       default_instance->host_no, dregs->fifo_count,
                       (unsigned int) count);
-               NCR5380_print(default_instance);
+               NCR5380_dprint(NDEBUG_DMA, default_instance);
        }
 
        /* setup udc */
-#ifdef OLDDMA
-       udc_regs->addr_hi = ((dvma_vtob(dmabuf) & 0xff0000) >> 8);
-       udc_regs->addr_lo = (dvma_vtob(dmabuf) & 0xffff);
-#else
        udc_regs->addr_hi = (((unsigned long)(addr) & 0xff0000) >> 8);
        udc_regs->addr_lo = ((unsigned long)(addr) & 0xffff);
-#endif
        udc_regs->count = count/2; /* count in words */
        udc_regs->mode_hi = UDC_MODE_HIWORD;
        if(write_flag) {
@@ -501,11 +564,13 @@ static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int wri
 
        /* interrupt enable */
        sun3_udc_write(UDC_INT_ENABLE, UDC_CSR);
+#endif
        
                return count;
 
 }
 
+#ifndef SUN3_SCSI_VME
 static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
 {
        unsigned short resid;
@@ -518,6 +583,7 @@ static inline unsigned long sun3scsi_dma_count(struct Scsi_Host *instance)
 
        return (unsigned long) resid;
 }
+#endif
 
 static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
 {
@@ -536,8 +602,23 @@ static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
 
 static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
 {
+#ifdef SUN3_SCSI_VME
+       unsigned short csr;
+
+       csr = dregs->csr;
 
+       dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
+       dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
+
+       dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
+       dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
+
+/*     if(!(csr & CSR_DMA_ENABLE))
+ *             dregs->csr |= CSR_DMA_ENABLE;
+ */
+#else
     sun3_udc_write(UDC_CHN_START, UDC_CSR);
+#endif
     
     return 0;
 }
@@ -545,12 +626,46 @@ static inline int sun3scsi_dma_start(unsigned long count, unsigned char *data)
 /* clean up after our dma is done */
 static int sun3scsi_dma_finish(int write_flag)
 {
-       unsigned short count;
+       unsigned short __maybe_unused count;
        unsigned short fifo;
        int ret = 0;
        
        sun3_dma_active = 0;
-#if 1
+
+#ifdef SUN3_SCSI_VME
+       dregs->csr &= ~CSR_DMA_ENABLE;
+
+       fifo = dregs->fifo_count;
+       if (write_flag) {
+               if ((fifo > 0) && (fifo < sun3_dma_orig_count))
+                       fifo++;
+       }
+
+       last_residual = fifo;
+       /* empty bytes from the fifo which didn't make it */
+       if ((!write_flag) && (dregs->csr & CSR_LEFT)) {
+               unsigned char *vaddr;
+
+               vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
+
+               vaddr += (sun3_dma_orig_count - fifo);
+               vaddr--;
+
+               switch (dregs->csr & CSR_LEFT) {
+               case CSR_LEFT_3:
+                       *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
+                       vaddr--;
+
+               case CSR_LEFT_2:
+                       *vaddr = (dregs->bpack_hi & 0x00ff);
+                       vaddr--;
+
+               case CSR_LEFT_1:
+                       *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
+                       break;
+               }
+       }
+#else
        // check to empty the fifo on a read
        if(!write_flag) {
                int tmo = 20000; /* .2 sec */
@@ -566,28 +681,8 @@ static int sun3scsi_dma_finish(int write_flag)
                        udelay(10);
                }
        }
-               
-#endif
 
        count = sun3scsi_dma_count(default_instance);
-#ifdef OLDDMA
-
-       /* if we've finished a read, copy out the data we read */
-       if(sun3_dma_orig_addr) {
-               /* check for residual bytes after dma end */
-               if(count && (NCR5380_read(BUS_AND_STATUS_REG) &
-                            (BASR_PHASE_MATCH | BASR_ACK))) {
-                       printk("scsi%d: sun3_scsi_finish: read overrun baby... ", default_instance->host_no);
-                       printk("basr now %02x\n", NCR5380_read(BUS_AND_STATUS_REG));
-                       ret = count;
-               }
-               
-               /* copy in what we dma'd no matter what */
-               memcpy(sun3_dma_orig_addr, dmabuf, sun3_dma_orig_count);
-               sun3_dma_orig_addr = NULL;
-
-       }
-#else
 
        fifo = dregs->fifo_count;
        last_residual = fifo;
@@ -605,10 +700,23 @@ static int sun3scsi_dma_finish(int write_flag)
                vaddr[-2] = (data & 0xff00) >> 8;
                vaddr[-1] = (data & 0xff);
        }
+#endif
 
        dvma_unmap(sun3_dma_orig_addr);
        sun3_dma_orig_addr = NULL;
-#endif
+
+#ifdef SUN3_SCSI_VME
+       dregs->dma_addr_hi = 0;
+       dregs->dma_addr_lo = 0;
+       dregs->dma_count_hi = 0;
+       dregs->dma_count_lo = 0;
+
+       dregs->fifo_count = 0;
+       dregs->fifo_count_hi = 0;
+
+       dregs->csr &= ~CSR_SEND;
+/*     dregs->csr |= CSR_DMA_ENABLE; */
+#else
        sun3_udc_write(UDC_RESET, UDC_CSR);
        dregs->fifo_count = 0;
        dregs->csr &= ~CSR_SEND;
@@ -616,6 +724,7 @@ static int sun3scsi_dma_finish(int write_flag)
        /* reset fifo */
        dregs->csr &= ~CSR_FIFO;
        dregs->csr |= CSR_FIFO;
+#endif
        
        sun3_dma_setup_done = NULL;
 
index a8da9c710fea52bd6caa7c19675d80e693ea7142..e96a37cf06ac117f45246e4f60e6745384a0a119 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: cumana_NCR5380.h,v $
- */
-
-#ifndef SUN3_NCR5380_H
-#define SUN3_NCR5380_H
+#ifndef SUN3_SCSI_H
+#define SUN3_SCSI_H
 
 #define SUN3SCSI_PUBLIC_RELEASE 1
 
@@ -82,8 +78,6 @@ static int sun3scsi_release (struct Scsi_Host *);
 #define SUN3_SCSI_NAME "Sun3 NCR5380 SCSI"
 #endif
 
-#ifndef HOSTS_C
-
 #define NCR5380_implementation_fields \
     int port, ctrl
 
@@ -108,9 +102,6 @@ static int sun3scsi_release (struct Scsi_Host *);
 #define NCR5380_dma_read_setup(instance, data, count) sun3scsi_dma_setup(data, count, 0)
 #define NCR5380_dma_residual sun3scsi_dma_residual
 
-#define BOARD_NORMAL   0
-#define BOARD_NCR53C400        1
-
 /* additional registers - mainly DMA control regs */
 /* these start at regbase + 8 -- directly after the NCR regs */
 struct sun3_dma_regs {
@@ -191,189 +182,5 @@ struct sun3_udc_regs {
 
 #define VME_DATA24 0x3d00
 
-// debugging printk's, taken from atari_scsi.h 
-/* Debugging printk definitions:
- *
- *  ARB  -> arbitration
- *  ASEN -> auto-sense
- *  DMA  -> DMA
- *  HSH  -> PIO handshake
- *  INF  -> information transfer
- *  INI  -> initialization
- *  INT  -> interrupt
- *  LNK  -> linked commands
- *  MAIN -> NCR5380_main() control flow
- *  NDAT -> no data-out phase
- *  NWR  -> no write commands
- *  PIO  -> PIO transfers
- *  PDMA -> pseudo DMA (unused on Atari)
- *  QU   -> queues
- *  RSL  -> reselections
- *  SEL  -> selections
- *  USL  -> usleep cpde (unused on Atari)
- *  LBS  -> last byte sent (unused on Atari)
- *  RSS  -> restarting of selections
- *  EXT  -> extended messages
- *  ABRT -> aborting and resetting
- *  TAG  -> queue tag handling
- *  MER  -> merging of consec. buffers
- *
- */
-
-#include "NCR5380.h"
-
-#if NDEBUG & NDEBUG_ARBITRATION
-#define ARB_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define ARB_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_AUTOSENSE
-#define ASEN_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define ASEN_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_DMA
-#define DMA_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define DMA_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_HANDSHAKE
-#define HSH_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define HSH_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INFORMATION
-#define INF_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define INF_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INIT
-#define INI_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define INI_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_INTR
-#define INT_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define INT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_LINKED
-#define LNK_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define LNK_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_MAIN
-#define MAIN_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define MAIN_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_NO_DATAOUT
-#define NDAT_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define NDAT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_NO_WRITE
-#define NWR_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define NWR_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_PIO
-#define PIO_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define PIO_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_PSEUDO_DMA
-#define PDMA_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define PDMA_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_QUEUES
-#define QU_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define QU_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_RESELECTION
-#define RSL_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define RSL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_SELECTION
-#define SEL_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define SEL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_USLEEP
-#define USL_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define USL_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_LAST_BYTE_SENT
-#define LBS_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define LBS_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_RESTART_SELECT
-#define RSS_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define RSS_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_EXTENDED
-#define EXT_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define EXT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_ABORT
-#define ABRT_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define ABRT_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_TAGS
-#define TAG_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define TAG_PRINTK(format, args...)
-#endif
-#if NDEBUG & NDEBUG_MERGING
-#define MER_PRINTK(format, args...) \
-       printk(KERN_DEBUG format , ## args)
-#else
-#define MER_PRINTK(format, args...)
-#endif
-
-/* conditional macros for NCR5380_print_{,phase,status} */
-
-#define NCR_PRINT(mask)        \
-       ((NDEBUG & (mask)) ? NCR5380_print(instance) : (void)0)
-
-#define NCR_PRINT_PHASE(mask) \
-       ((NDEBUG & (mask)) ? NCR5380_print_phase(instance) : (void)0)
-
-#define NCR_PRINT_STATUS(mask) \
-       ((NDEBUG & (mask)) ? NCR5380_print_status(instance) : (void)0)
-
-
-
-#endif /* ndef HOSTS_C */
-#endif /* SUN3_NCR5380_H */
+#endif /* SUN3_SCSI_H */
 
index a3dd55d1d2fdb3d3a4294707614ef033f9d9831b..1eeece6e2040977a11c09756cb72801b491348f3 100644 (file)
@@ -1,589 +1,3 @@
- /*
- * Sun3 SCSI stuff by Erik Verbruggen (erik@bigmama.xtdnet.nl)
- *
- * Sun3 DMA routines added by Sam Creasey (sammy@sammy.net)
- *
- * VME support added by Sam Creasey
- *
- * Adapted from sun3_scsi.c -- see there for other headers
- *
- * TODO: modify this driver to support multiple Sun3 SCSI VME boards
- *
- */
-
-#define AUTOSENSE
-
-#include <linux/types.h>
-#include <linux/stddef.h>
-#include <linux/ctype.h>
-#include <linux/delay.h>
-
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/ioport.h>
-#include <linux/init.h>
-#include <linux/blkdev.h>
-
-#include <asm/io.h>
-
-#include <asm/sun3ints.h>
-#include <asm/dvma.h>
-#include <asm/idprom.h>
-#include <asm/machines.h>
-
 #define SUN3_SCSI_VME
 
-#undef SUN3_SCSI_DEBUG
-
-/* dma on! */
-#define REAL_DMA
-
-#define NDEBUG 0
-
-#define NDEBUG_ABORT           0x00100000
-#define NDEBUG_TAGS            0x00200000
-#define NDEBUG_MERGING         0x00400000
-
-#include "scsi.h"
-#include "initio.h"
-#include <scsi/scsi_host.h>
-#include "sun3_scsi.h"
-
-extern int sun3_map_test(unsigned long, char *);
-
-#define USE_WRAPPER
-/*#define RESET_BOOT */
-#define DRIVER_SETUP
-
-/*
- * BUG can be used to trigger a strange code-size related hang on 2.1 kernels
- */
-#ifdef BUG
-#undef RESET_BOOT
-#undef DRIVER_SETUP
-#endif
-
-/* #define SUPPORT_TAGS */
-
-//#define      ENABLE_IRQ()    enable_irq( SUN3_VEC_VMESCSI0 ); 
-#define ENABLE_IRQ()
-
-
-static irqreturn_t scsi_sun3_intr(int irq, void *dummy);
-static inline unsigned char sun3scsi_read(int reg);
-static inline void sun3scsi_write(int reg, int value);
-
-static int setup_can_queue = -1;
-module_param(setup_can_queue, int, 0);
-static int setup_cmd_per_lun = -1;
-module_param(setup_cmd_per_lun, int, 0);
-static int setup_sg_tablesize = -1;
-module_param(setup_sg_tablesize, int, 0);
-#ifdef SUPPORT_TAGS
-static int setup_use_tagged_queuing = -1;
-module_param(setup_use_tagged_queuing, int, 0);
-#endif
-static int setup_hostid = -1;
-module_param(setup_hostid, int, 0);
-
-static struct scsi_cmnd *sun3_dma_setup_done = NULL;
-
-#define        AFTER_RESET_DELAY       (HZ/2)
-
-/* ms to wait after hitting dma regs */
-#define SUN3_DMA_DELAY 10
-
-/* dvma buffer to allocate -- 32k should hopefully be more than sufficient */
-#define SUN3_DVMA_BUFSIZE 0xe000
-
-/* minimum number of bytes to do dma on */
-#define SUN3_DMA_MINSIZE 128
-
-static volatile unsigned char *sun3_scsi_regp;
-static volatile struct sun3_dma_regs *dregs;
-#ifdef OLDDMA
-static unsigned char *dmabuf = NULL; /* dma memory buffer */
-#endif
-static unsigned char *sun3_dma_orig_addr = NULL;
-static unsigned long sun3_dma_orig_count = 0;
-static int sun3_dma_active = 0;
-static unsigned long last_residual = 0;
-
-/*
- * NCR 5380 register access functions
- */
-
-static inline unsigned char sun3scsi_read(int reg)
-{
-       return( sun3_scsi_regp[reg] );
-}
-
-static inline void sun3scsi_write(int reg, int value)
-{
-       sun3_scsi_regp[reg] = value;
-}
-
-/*
- * XXX: status debug
- */
-static struct Scsi_Host *default_instance;
-
-/*
- * Function : int sun3scsi_detect(struct scsi_host_template * tpnt)
- *
- * Purpose : initializes mac NCR5380 driver based on the
- *     command line / compile time port and irq definitions.
- *
- * Inputs : tpnt - template for this SCSI adapter.
- *
- * Returns : 1 if a host adapter was found, 0 if not.
- *
- */
-static int __init sun3scsi_detect(struct scsi_host_template * tpnt)
-{
-       unsigned long ioaddr, irq = 0;
-       static int called = 0;
-       struct Scsi_Host *instance;
-       int i;
-       unsigned long addrs[3] = { IOBASE_SUN3_VMESCSI, 
-                                  IOBASE_SUN3_VMESCSI + 0x4000,
-                                  0 };
-       unsigned long vecs[3] = { SUN3_VEC_VMESCSI0,
-                                 SUN3_VEC_VMESCSI1,
-                                 0 };
-       /* check that this machine has an onboard 5380 */
-       switch(idprom->id_machtype) {
-       case SM_SUN3|SM_3_160:
-       case SM_SUN3|SM_3_260:
-               break;
-
-       default:
-               return 0;
-       }
-
-       if(called)
-               return 0;
-
-       tpnt->proc_name = "Sun3 5380 VME SCSI";
-
-       /* setup variables */
-       tpnt->can_queue =
-               (setup_can_queue > 0) ? setup_can_queue : CAN_QUEUE;
-       tpnt->cmd_per_lun =
-               (setup_cmd_per_lun > 0) ? setup_cmd_per_lun : CMD_PER_LUN;
-       tpnt->sg_tablesize = 
-               (setup_sg_tablesize >= 0) ? setup_sg_tablesize : SG_TABLESIZE;
-       
-       if (setup_hostid >= 0)
-               tpnt->this_id = setup_hostid;
-       else {
-               /* use 7 as default */
-               tpnt->this_id = 7;
-       }
-       
-       ioaddr = 0;
-       for(i = 0; addrs[i] != 0; i++) {
-               unsigned char x;
-               
-               ioaddr = (unsigned long)sun3_ioremap(addrs[i], PAGE_SIZE,
-                                                    SUN3_PAGE_TYPE_VME16);
-               irq = vecs[i];
-               sun3_scsi_regp = (unsigned char *)ioaddr;
-               
-               dregs = (struct sun3_dma_regs *)(((unsigned char *)ioaddr) + 8);
-               
-               if(sun3_map_test((unsigned long)dregs, &x)) {
-                       unsigned short oldcsr;
-
-                       oldcsr = dregs->csr;
-                       dregs->csr = 0;
-                       udelay(SUN3_DMA_DELAY);
-                       if(dregs->csr == 0x1400)
-                               break;
-                       
-                       dregs->csr = oldcsr;
-               }
-
-               iounmap((void *)ioaddr);
-               ioaddr = 0;
-       }
-
-       if(!ioaddr)
-               return 0;
-       
-#ifdef SUPPORT_TAGS
-       if (setup_use_tagged_queuing < 0)
-               setup_use_tagged_queuing = USE_TAGGED_QUEUING;
-#endif
-
-       instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata));
-       if(instance == NULL)
-               return 0;
-               
-       default_instance = instance;
-
-        instance->io_port = (unsigned long) ioaddr;
-       instance->irq = irq;
-
-       NCR5380_init(instance, 0);
-
-       instance->n_io_port = 32;
-
-        ((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
-
-       if (request_irq(instance->irq, scsi_sun3_intr,
-                       0, "Sun3SCSI-5380VME", instance)) {
-#ifndef REAL_DMA
-               printk("scsi%d: IRQ%d not free, interrupts disabled\n",
-                      instance->host_no, instance->irq);
-               instance->irq = SCSI_IRQ_NONE;
-#else
-               printk("scsi%d: IRQ%d not free, bailing out\n",
-                      instance->host_no, instance->irq);
-               return 0;
-#endif
-       }
-
-       printk("scsi%d: Sun3 5380 VME at port %lX irq", instance->host_no, instance->io_port);
-       if (instance->irq == SCSI_IRQ_NONE)
-               printk ("s disabled");
-       else
-               printk (" %d", instance->irq);
-       printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
-              instance->can_queue, instance->cmd_per_lun,
-              SUN3SCSI_PUBLIC_RELEASE);
-       printk("\nscsi%d:", instance->host_no);
-       NCR5380_print_options(instance);
-       printk("\n");
-
-       dregs->csr = 0;
-       udelay(SUN3_DMA_DELAY);
-       dregs->csr = CSR_SCSI | CSR_FIFO | CSR_INTR;
-       udelay(SUN3_DMA_DELAY);
-       dregs->fifo_count = 0;
-       dregs->fifo_count_hi = 0;
-       dregs->dma_addr_hi = 0;
-       dregs->dma_addr_lo = 0;
-       dregs->dma_count_hi = 0;
-       dregs->dma_count_lo = 0;
-
-       dregs->ivect = VME_DATA24 | (instance->irq & 0xff);
-
-       called = 1;
-
-#ifdef RESET_BOOT
-       sun3_scsi_reset_boot(instance);
-#endif
-
-       return 1;
-}
-
-int sun3scsi_release (struct Scsi_Host *shpnt)
-{
-       if (shpnt->irq != SCSI_IRQ_NONE)
-               free_irq(shpnt->irq, shpnt);
-
-       iounmap((void *)sun3_scsi_regp);
-
-       NCR5380_exit(shpnt);
-       return 0;
-}
-
-#ifdef RESET_BOOT
-/*
- * Our 'bus reset on boot' function
- */
-
-static void sun3_scsi_reset_boot(struct Scsi_Host *instance)
-{
-       unsigned long end;
-
-       NCR5380_local_declare();
-       NCR5380_setup(instance);
-       
-       /*
-        * Do a SCSI reset to clean up the bus during initialization. No
-        * messing with the queues, interrupts, or locks necessary here.
-        */
-
-       printk( "Sun3 SCSI: resetting the SCSI bus..." );
-
-       /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
-//             sun3_disable_irq( IRQ_SUN3_SCSI );
-
-       /* get in phase */
-       NCR5380_write( TARGET_COMMAND_REG,
-                     PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
-
-       /* assert RST */
-       NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST );
-
-       /* The min. reset hold time is 25us, so 40us should be enough */
-       udelay( 50 );
-
-       /* reset RST and interrupt */
-       NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE );
-       NCR5380_read( RESET_PARITY_INTERRUPT_REG );
-
-       for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
-               barrier();
-
-       /* switch on SCSI IRQ again */
-//             sun3_enable_irq( IRQ_SUN3_SCSI );
-
-       printk( " done\n" );
-}
-#endif
-
-static const char * sun3scsi_info (struct Scsi_Host *spnt) {
-    return "";
-}
-
-// safe bits for the CSR
-#define CSR_GOOD 0x060f
-
-static irqreturn_t scsi_sun3_intr(int irq, void *dummy)
-{
-       unsigned short csr = dregs->csr;
-       int handled = 0;
-
-       dregs->csr &= ~CSR_DMA_ENABLE;
-
-
-#ifdef SUN3_SCSI_DEBUG
-       printk("scsi_intr csr %x\n", csr);
-#endif
-
-       if(csr & ~CSR_GOOD) {
-               if(csr & CSR_DMA_BUSERR) {
-                       printk("scsi%d: bus error in dma\n", default_instance->host_no);
-#ifdef SUN3_SCSI_DEBUG
-                       printk("scsi: residual %x count %x addr %p dmaaddr %x\n", 
-                              dregs->fifo_count,
-                              dregs->dma_count_lo | (dregs->dma_count_hi << 16),
-                              sun3_dma_orig_addr,
-                              dregs->dma_addr_lo | (dregs->dma_addr_hi << 16));
-#endif
-               }
-
-               if(csr & CSR_DMA_CONFLICT) {
-                       printk("scsi%d: dma conflict\n", default_instance->host_no);
-               }
-               handled = 1;
-       }
-
-       if(csr & (CSR_SDB_INT | CSR_DMA_INT)) {
-               NCR5380_intr(irq, dummy);
-               handled = 1;
-       }
-
-       return IRQ_RETVAL(handled);
-}
-
-/*
- * Debug stuff - to be called on NMI, or sysrq key. Use at your own risk; 
- * reentering NCR5380_print_status seems to have ugly side effects
- */
-
-/* this doesn't seem to get used at all -- sam */
-#if 0
-void sun3_sun3_debug (void)
-{
-       unsigned long flags;
-       NCR5380_local_declare();
-
-       if (default_instance) {
-                       local_irq_save(flags);
-                       NCR5380_print_status(default_instance);
-                       local_irq_restore(flags);
-       }
-}
-#endif
-
-
-/* sun3scsi_dma_setup() -- initialize the dma controller for a read/write */
-static unsigned long sun3scsi_dma_setup(void *data, unsigned long count, int write_flag)
-{
-       void *addr;
-
-       if(sun3_dma_orig_addr != NULL)
-               dvma_unmap(sun3_dma_orig_addr);
-
-//     addr = sun3_dvma_page((unsigned long)data, (unsigned long)dmabuf);
-       addr = (void *)dvma_map_vme((unsigned long) data, count);
-               
-       sun3_dma_orig_addr = addr;
-       sun3_dma_orig_count = count;
-       
-#ifdef SUN3_SCSI_DEBUG
-       printk("scsi: dma_setup addr %p count %x\n", addr, count);
-#endif
-
-//     dregs->fifo_count = 0;
-#if 0  
-       /* reset fifo */
-       dregs->csr &= ~CSR_FIFO;
-       dregs->csr |= CSR_FIFO;
-#endif 
-       /* set direction */
-       if(write_flag)
-               dregs->csr |= CSR_SEND;
-       else
-               dregs->csr &= ~CSR_SEND;
-       
-       /* reset fifo */
-//     dregs->csr &= ~CSR_FIFO;
-//     dregs->csr |= CSR_FIFO;
-
-       dregs->csr |= CSR_PACK_ENABLE;
-
-       dregs->dma_addr_hi = ((unsigned long)addr >> 16);
-       dregs->dma_addr_lo = ((unsigned long)addr & 0xffff);
-       
-       dregs->dma_count_hi = 0;
-       dregs->dma_count_lo = 0;
-       dregs->fifo_count_hi = 0;
-       dregs->fifo_count = 0;
-               
-#ifdef SUN3_SCSI_DEBUG
-       printk("scsi: dma_setup done csr %x\n", dregs->csr);
-#endif
-               return count;
-
-}
-
-static inline unsigned long sun3scsi_dma_residual(struct Scsi_Host *instance)
-{
-       return last_residual;
-}
-
-static inline unsigned long sun3scsi_dma_xfer_len(unsigned long wanted,
-                                                 struct scsi_cmnd *cmd,
-                                                 int write_flag)
-{
-       if (cmd->request->cmd_type == REQ_TYPE_FS)
-               return wanted;
-       else
-               return 0;
-}
-
-static int sun3scsi_dma_start(unsigned long count, char *data)
-{
-       
-       unsigned short csr;
-
-       csr = dregs->csr;
-#ifdef SUN3_SCSI_DEBUG
-       printk("scsi: dma_start data %p count %x csr %x fifo %x\n", data, count, csr, dregs->fifo_count);
-#endif
-       
-       dregs->dma_count_hi = (sun3_dma_orig_count >> 16);
-       dregs->dma_count_lo = (sun3_dma_orig_count & 0xffff);
-
-       dregs->fifo_count_hi = (sun3_dma_orig_count >> 16);
-       dregs->fifo_count = (sun3_dma_orig_count & 0xffff);
-
-//     if(!(csr & CSR_DMA_ENABLE))
-//             dregs->csr |= CSR_DMA_ENABLE;
-
-       return 0;
-}
-
-/* clean up after our dma is done */
-static int sun3scsi_dma_finish(int write_flag)
-{
-       unsigned short fifo;
-       int ret = 0;
-       
-       sun3_dma_active = 0;
-
-       dregs->csr &= ~CSR_DMA_ENABLE;
-       
-       fifo = dregs->fifo_count;
-       if(write_flag) {
-               if((fifo > 0) && (fifo < sun3_dma_orig_count))
-                       fifo++;
-       }
-
-       last_residual = fifo;
-#ifdef SUN3_SCSI_DEBUG
-       printk("scsi: residual %x total %x\n", fifo, sun3_dma_orig_count);
-#endif
-       /* empty bytes from the fifo which didn't make it */
-       if((!write_flag) && (dregs->csr & CSR_LEFT)) {
-               unsigned char *vaddr;
-
-#ifdef SUN3_SCSI_DEBUG
-               printk("scsi: got left over bytes\n");
-#endif
-
-               vaddr = (unsigned char *)dvma_vmetov(sun3_dma_orig_addr);
-               
-               vaddr += (sun3_dma_orig_count - fifo);
-               vaddr--;
-               
-               switch(dregs->csr & CSR_LEFT) {
-               case CSR_LEFT_3:
-                       *vaddr = (dregs->bpack_lo & 0xff00) >> 8;
-                       vaddr--;
-                       
-               case CSR_LEFT_2:
-                       *vaddr = (dregs->bpack_hi & 0x00ff);
-                       vaddr--;
-                       
-               case CSR_LEFT_1:
-                       *vaddr = (dregs->bpack_hi & 0xff00) >> 8;
-                       break;
-               }
-               
-               
-       }
-
-       dvma_unmap(sun3_dma_orig_addr);
-       sun3_dma_orig_addr = NULL;
-
-       dregs->dma_addr_hi = 0;
-       dregs->dma_addr_lo = 0;
-       dregs->dma_count_hi = 0;
-       dregs->dma_count_lo = 0;
-
-       dregs->fifo_count = 0;
-       dregs->fifo_count_hi = 0;
-
-       dregs->csr &= ~CSR_SEND;
-       
-//     dregs->csr |= CSR_DMA_ENABLE;
-       
-#if 0
-       /* reset fifo */
-       dregs->csr &= ~CSR_FIFO;
-       dregs->csr |= CSR_FIFO;
-#endif 
-       sun3_dma_setup_done = NULL;
-
-       return ret;
-
-}
-
-#include "sun3_NCR5380.c"
-
-static struct scsi_host_template driver_template = {
-       .name                   = SUN3_SCSI_NAME,
-       .detect                 = sun3scsi_detect,
-       .release                = sun3scsi_release,
-       .info                   = sun3scsi_info,
-       .queuecommand           = sun3scsi_queue_command,
-       .eh_abort_handler       = sun3scsi_abort,
-       .eh_bus_reset_handler   = sun3scsi_bus_reset,
-       .can_queue              = CAN_QUEUE,
-       .this_id                = 7,
-       .sg_tablesize           = SG_TABLESIZE,
-       .cmd_per_lun            = CMD_PER_LUN,
-       .use_clustering         = DISABLE_CLUSTERING
-};
-
-
-#include "scsi_module.c"
-
-MODULE_LICENSE("GPL");
+#include "sun3_scsi.c"
index a4abce9d526e6b553e1e7412ddf8c92e68cf2906..8cc80931df14990860e1e1525d83dd3d9c0dbf57 100644 (file)
  * 15 9-11
  */
  
-/*
- * $Log: t128.c,v $
- */
-
 #include <linux/signal.h>
 #include <linux/io.h>
 #include <linux/blkdev.h>
index 1df82c28e56d6af8b1a4558bee1144e03299f43f..fd68cecc62afa9aee9403deebe9822d47afd6f25 100644 (file)
  * 1+ (800) 334-5454
  */
 
-/*
- * $Log: t128.h,v $
- */
-
 #ifndef T128_H
 #define T128_H
 
@@ -107,8 +103,6 @@ static int t128_bus_reset(struct scsi_cmnd *);
 #define CAN_QUEUE 32
 #endif
 
-#ifndef HOSTS_C
-
 #define NCR5380_implementation_fields \
     void __iomem *base
 
@@ -148,6 +142,5 @@ static int t128_bus_reset(struct scsi_cmnd *);
 
 #define T128_IRQS 0xc4a8
 
-#endif /* else def HOSTS_C */
 #endif /* ndef ASM */
 #endif /* T128_H */
index 7210500905207e0b7ef343beff02a7195c6bfb23..f42d1cee652aaec50dbfa9bcd7dd7848ab99191e 100644 (file)
@@ -196,9 +196,9 @@ enum {
  * @dword_2: UPIU header DW-2
  */
 struct utp_upiu_header {
-       u32 dword_0;
-       u32 dword_1;
-       u32 dword_2;
+       __be32 dword_0;
+       __be32 dword_1;
+       __be32 dword_2;
 };
 
 /**
@@ -207,7 +207,7 @@ struct utp_upiu_header {
  * @cdb: Command Descriptor Block CDB DW-4 to DW-7
  */
 struct utp_upiu_cmd {
-       u32 exp_data_transfer_len;
+       __be32 exp_data_transfer_len;
        u8 cdb[MAX_CDB_SIZE];
 };
 
@@ -228,10 +228,10 @@ struct utp_upiu_query {
        u8 idn;
        u8 index;
        u8 selector;
-       u16 reserved_osf;
-       u16 length;
-       u32 value;
-       u32 reserved[2];
+       __be16 reserved_osf;
+       __be16 length;
+       __be32 value;
+       __be32 reserved[2];
 };
 
 /**
@@ -256,9 +256,9 @@ struct utp_upiu_req {
  * @sense_data: Sense data field DW-8 to DW-12
  */
 struct utp_cmd_rsp {
-       u32 residual_transfer_count;
-       u32 reserved[4];
-       u16 sense_data_len;
+       __be32 residual_transfer_count;
+       __be32 reserved[4];
+       __be16 sense_data_len;
        u8 sense_data[18];
 };
 
@@ -286,10 +286,10 @@ struct utp_upiu_rsp {
  */
 struct utp_upiu_task_req {
        struct utp_upiu_header header;
-       u32 input_param1;
-       u32 input_param2;
-       u32 input_param3;
-       u32 reserved[2];
+       __be32 input_param1;
+       __be32 input_param2;
+       __be32 input_param3;
+       __be32 reserved[2];
 };
 
 /**
@@ -301,9 +301,9 @@ struct utp_upiu_task_req {
  */
 struct utp_upiu_task_rsp {
        struct utp_upiu_header header;
-       u32 output_param1;
-       u32 output_param2;
-       u32 reserved[3];
+       __be32 output_param1;
+       __be32 output_param2;
+       __be32 reserved[3];
 };
 
 /**
index 04884d663e4e13bcb88b4ac46dcc76e48f77620d..0c2877251251d9e7a4a94e0e654d3c9e50a40609 100644 (file)
@@ -55,6 +55,9 @@
 /* Query request timeout */
 #define QUERY_REQ_TIMEOUT 30 /* msec */
 
+/* Task management command timeout */
+#define TM_CMD_TIMEOUT 100 /* msecs */
+
 /* Expose the flag value from utp_upiu_query.value */
 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
 
@@ -71,9 +74,22 @@ enum {
 
 /* UFSHCD states */
 enum {
-       UFSHCD_STATE_OPERATIONAL,
        UFSHCD_STATE_RESET,
        UFSHCD_STATE_ERROR,
+       UFSHCD_STATE_OPERATIONAL,
+};
+
+/* UFSHCD error handling flags */
+enum {
+       UFSHCD_EH_IN_PROGRESS = (1 << 0),
+};
+
+/* UFSHCD UIC layer error flags */
+enum {
+       UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
+       UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
+       UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
+       UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
 };
 
 /* Interrupt configuration options */
@@ -83,6 +99,18 @@ enum {
        UFSHCD_INT_CLEAR,
 };
 
+#define ufshcd_set_eh_in_progress(h) \
+       (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_eh_in_progress(h) \
+       (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
+#define ufshcd_clear_eh_in_progress(h) \
+       (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
+
+static void ufshcd_tmc_handler(struct ufs_hba *hba);
+static void ufshcd_async_scan(void *data, async_cookie_t cookie);
+static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
+
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -163,7 +191,7 @@ static inline int ufshcd_is_device_present(u32 reg_hcs)
  */
 static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 {
-       return lrbp->utr_descriptor_ptr->header.dword_2 & MASK_OCS;
+       return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
 }
 
 /**
@@ -176,19 +204,41 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
 static inline int
 ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
 {
-       return task_req_descp->header.dword_2 & MASK_OCS;
+       return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
 }
 
 /**
  * ufshcd_get_tm_free_slot - get a free slot for task management request
  * @hba: per adapter instance
+ * @free_slot: pointer to variable with available slot value
  *
- * Returns maximum number of task management request slots in case of
- * task management queue full or returns the free slot number
+ * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
+ * Returns 0 if free slot is not available, else return 1 with tag value
+ * in @free_slot.
  */
-static inline int ufshcd_get_tm_free_slot(struct ufs_hba *hba)
+static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
 {
-       return find_first_zero_bit(&hba->outstanding_tasks, hba->nutmrs);
+       int tag;
+       bool ret = false;
+
+       if (!free_slot)
+               goto out;
+
+       do {
+               tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
+               if (tag >= hba->nutmrs)
+                       goto out;
+       } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
+
+       *free_slot = tag;
+       ret = true;
+out:
+       return ret;
+}
+
+static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
+{
+       clear_bit_unlock(slot, &hba->tm_slots_in_use);
 }
 
 /**
@@ -389,26 +439,6 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
        }
 }
 
-/**
- * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
- * @response: upiu query response to convert
- */
-static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
-{
-       response->length = be16_to_cpu(response->length);
-       response->value = be32_to_cpu(response->value);
-}
-
-/**
- * ufshcd_query_to_be() - formats the buffer to big endian
- * @request: upiu query request to convert
- */
-static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
-{
-       request->length = cpu_to_be16(request->length);
-       request->value = cpu_to_be32(request->value);
-}
-
 /**
  * ufshcd_copy_query_response() - Copy the Query Response and the data
  * descriptor
@@ -425,7 +455,6 @@ void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
                        UPIU_RSP_CODE_OFFSET;
 
        memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
-       ufshcd_query_to_cpu(&query_res->upiu_res);
 
 
        /* Get the descriptor */
@@ -749,7 +778,7 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
 {
        struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
        struct ufs_query *query = &hba->dev_cmd.query;
-       u16 len = query->request.upiu_req.length;
+       u16 len = be16_to_cpu(query->request.upiu_req.length);
        u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
 
        /* Query request header */
@@ -766,7 +795,6 @@ static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
        /* Copy the Query Request buffer as is */
        memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
                        QUERY_OSF_SIZE);
-       ufshcd_query_to_be(&ucd_req_ptr->qr);
 
        /* Copy the Descriptor */
        if ((len > 0) && (query->request.upiu_req.opcode ==
@@ -853,10 +881,25 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
 
        tag = cmd->request->tag;
 
-       if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       switch (hba->ufshcd_state) {
+       case UFSHCD_STATE_OPERATIONAL:
+               break;
+       case UFSHCD_STATE_RESET:
                err = SCSI_MLQUEUE_HOST_BUSY;
-               goto out;
+               goto out_unlock;
+       case UFSHCD_STATE_ERROR:
+               set_host_byte(cmd, DID_ERROR);
+               cmd->scsi_done(cmd);
+               goto out_unlock;
+       default:
+               dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+                               __func__, hba->ufshcd_state);
+               set_host_byte(cmd, DID_BAD_TARGET);
+               cmd->scsi_done(cmd);
+               goto out_unlock;
        }
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /* acquire the tag to make sure device cmds don't use it */
        if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
@@ -893,6 +936,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
        /* issue command to the controller */
        spin_lock_irqsave(hba->host->host_lock, flags);
        ufshcd_send_command(hba, tag);
+out_unlock:
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
        return err;
@@ -1151,7 +1195,7 @@ static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
        }
 
        if (flag_res)
-               *flag_res = (response->upiu_res.value &
+               *flag_res = (be32_to_cpu(response->upiu_res.value) &
                                MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
 
 out_unlock:
@@ -1170,7 +1214,7 @@ out_unlock:
  *
  * Returns 0 for success, non-zero in case of failure
 */
-int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                        enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
 {
        struct ufs_query_req *request;
@@ -1195,7 +1239,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
        switch (opcode) {
        case UPIU_QUERY_OPCODE_WRITE_ATTR:
                request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
-               request->upiu_req.value = *attr_val;
+               request->upiu_req.value = cpu_to_be32(*attr_val);
                break;
        case UPIU_QUERY_OPCODE_READ_ATTR:
                request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
@@ -1222,7 +1266,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
                goto out_unlock;
        }
 
-       *attr_val = response->upiu_res.value;
+       *attr_val = be32_to_cpu(response->upiu_res.value);
 
 out_unlock:
        mutex_unlock(&hba->dev_cmd.lock);
@@ -1481,7 +1525,7 @@ EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
  *
  * Returns 0 on success, non-zero value on failure
  */
-int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
+static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
 {
        struct uic_command uic_cmd = {0};
        struct completion pwr_done;
@@ -1701,11 +1745,6 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
                goto out;
        }
 
-       if (hba->ufshcd_state == UFSHCD_STATE_RESET)
-               scsi_unblock_requests(hba->host);
-
-       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-
 out:
        return err;
 }
@@ -1830,66 +1869,6 @@ static int ufshcd_verify_dev_init(struct ufs_hba *hba)
        return err;
 }
 
-/**
- * ufshcd_do_reset - reset the host controller
- * @hba: per adapter instance
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_do_reset(struct ufs_hba *hba)
-{
-       struct ufshcd_lrb *lrbp;
-       unsigned long flags;
-       int tag;
-
-       /* block commands from midlayer */
-       scsi_block_requests(hba->host);
-
-       spin_lock_irqsave(hba->host->host_lock, flags);
-       hba->ufshcd_state = UFSHCD_STATE_RESET;
-
-       /* send controller to reset state */
-       ufshcd_hba_stop(hba);
-       spin_unlock_irqrestore(hba->host->host_lock, flags);
-
-       /* abort outstanding commands */
-       for (tag = 0; tag < hba->nutrs; tag++) {
-               if (test_bit(tag, &hba->outstanding_reqs)) {
-                       lrbp = &hba->lrb[tag];
-                       if (lrbp->cmd) {
-                               scsi_dma_unmap(lrbp->cmd);
-                               lrbp->cmd->result = DID_RESET << 16;
-                               lrbp->cmd->scsi_done(lrbp->cmd);
-                               lrbp->cmd = NULL;
-                               clear_bit_unlock(tag, &hba->lrb_in_use);
-                       }
-               }
-       }
-
-       /* complete device management command */
-       if (hba->dev_cmd.complete)
-               complete(hba->dev_cmd.complete);
-
-       /* clear outstanding request/task bit maps */
-       hba->outstanding_reqs = 0;
-       hba->outstanding_tasks = 0;
-
-       /* Host controller enable */
-       if (ufshcd_hba_enable(hba)) {
-               dev_err(hba->dev,
-                       "Reset: Controller initialization failed\n");
-               return FAILED;
-       }
-
-       if (ufshcd_link_startup(hba)) {
-               dev_err(hba->dev,
-                       "Reset: Link start-up failed\n");
-               return FAILED;
-       }
-
-       return SUCCESS;
-}
-
 /**
  * ufshcd_slave_alloc - handle initial SCSI device configurations
  * @sdev: pointer to SCSI device
@@ -1907,6 +1886,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
        sdev->use_10_for_ms = 1;
        scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
 
+       /* allow SCSI layer to restart the device in case of errors */
+       sdev->allow_restart = 1;
+
        /*
         * Inform SCSI Midlayer that the LUN queue depth is same as the
         * controller queue depth. If a LUN queue depth is less than the
@@ -1934,10 +1916,11 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
  * ufshcd_task_req_compl - handle task management request completion
  * @hba: per adapter instance
  * @index: index of the completed request
+ * @resp: task management service response
  *
- * Returns SUCCESS/FAILED
+ * Returns non-zero value on error, zero on success
  */
-static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
+static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
 {
        struct utp_task_req_desc *task_req_descp;
        struct utp_upiu_task_rsp *task_rsp_upiup;
@@ -1958,19 +1941,15 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
                                task_req_descp[index].task_rsp_upiu;
                task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
                task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
-
-               if (task_result != UPIU_TASK_MANAGEMENT_FUNC_COMPL &&
-                   task_result != UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED)
-                       task_result = FAILED;
-               else
-                       task_result = SUCCESS;
+               if (resp)
+                       *resp = (u8)task_result;
        } else {
-               task_result = FAILED;
-               dev_err(hba->dev,
-                       "trc: Invalid ocs = %x\n", ocs_value);
+               dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
+                               __func__, ocs_value);
        }
        spin_unlock_irqrestore(hba->host->host_lock, flags);
-       return task_result;
+
+       return ocs_value;
 }
 
 /**
@@ -2105,6 +2084,9 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
        case OCS_ABORTED:
                result |= DID_ABORT << 16;
                break;
+       case OCS_INVALID_COMMAND_STATUS:
+               result |= DID_REQUEUE << 16;
+               break;
        case OCS_INVALID_CMD_TABLE_ATTR:
        case OCS_INVALID_PRDT_ATTR:
        case OCS_MISMATCH_DATA_BUF_SIZE:
@@ -2422,41 +2404,145 @@ out:
 }
 
 /**
- * ufshcd_fatal_err_handler - handle fatal errors
- * @hba: per adapter instance
+ * ufshcd_err_handler - handle UFS errors that require s/w attention
+ * @work: pointer to work structure
  */
-static void ufshcd_fatal_err_handler(struct work_struct *work)
+static void ufshcd_err_handler(struct work_struct *work)
 {
        struct ufs_hba *hba;
-       hba = container_of(work, struct ufs_hba, feh_workq);
+       unsigned long flags;
+       u32 err_xfer = 0;
+       u32 err_tm = 0;
+       int err = 0;
+       int tag;
+
+       hba = container_of(work, struct ufs_hba, eh_work);
 
        pm_runtime_get_sync(hba->dev);
-       /* check if reset is already in progress */
-       if (hba->ufshcd_state != UFSHCD_STATE_RESET)
-               ufshcd_do_reset(hba);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               goto out;
+       }
+
+       hba->ufshcd_state = UFSHCD_STATE_RESET;
+       ufshcd_set_eh_in_progress(hba);
+
+       /* Complete requests that have door-bell cleared by h/w */
+       ufshcd_transfer_req_compl(hba);
+       ufshcd_tmc_handler(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* Clear pending transfer requests */
+       for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
+               if (ufshcd_clear_cmd(hba, tag))
+                       err_xfer |= 1 << tag;
+
+       /* Clear pending task management requests */
+       for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
+               if (ufshcd_clear_tm_cmd(hba, tag))
+                       err_tm |= 1 << tag;
+
+       /* Complete the requests that are cleared by s/w */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_transfer_req_compl(hba);
+       ufshcd_tmc_handler(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* Fatal errors need reset */
+       if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
+                       ((hba->saved_err & UIC_ERROR) &&
+                        (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
+               err = ufshcd_reset_and_restore(hba);
+               if (err) {
+                       dev_err(hba->dev, "%s: reset and restore failed\n",
+                                       __func__);
+                       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+               }
+               /*
+                * Inform scsi mid-layer that we did reset and allow to handle
+                * Unit Attention properly.
+                */
+               scsi_report_bus_reset(hba->host, 0);
+               hba->saved_err = 0;
+               hba->saved_uic_err = 0;
+       }
+       ufshcd_clear_eh_in_progress(hba);
+
+out:
+       scsi_unblock_requests(hba->host);
        pm_runtime_put_sync(hba->dev);
 }
 
 /**
- * ufshcd_err_handler - Check for fatal errors
- * @work: pointer to a work queue structure
+ * ufshcd_update_uic_error - check and set fatal UIC error flags.
+ * @hba: per-adapter instance
  */
-static void ufshcd_err_handler(struct ufs_hba *hba)
+static void ufshcd_update_uic_error(struct ufs_hba *hba)
 {
        u32 reg;
 
+       /* PA_INIT_ERROR is fatal and needs UIC reset */
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
+       if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+               hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
+
+       /* UIC NL/TL/DME errors needs software retry */
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
+       if (reg)
+               hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
+       if (reg)
+               hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+
+       reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
+       if (reg)
+               hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+
+       dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
+                       __func__, hba->uic_error);
+}
+
+/**
+ * ufshcd_check_errors - Check for errors that need s/w attention
+ * @hba: per-adapter instance
+ */
+static void ufshcd_check_errors(struct ufs_hba *hba)
+{
+       bool queue_eh_work = false;
+
        if (hba->errors & INT_FATAL_ERRORS)
-               goto fatal_eh;
+               queue_eh_work = true;
 
        if (hba->errors & UIC_ERROR) {
-               reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
-               if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
-                       goto fatal_eh;
+               hba->uic_error = 0;
+               ufshcd_update_uic_error(hba);
+               if (hba->uic_error)
+                       queue_eh_work = true;
        }
-       return;
-fatal_eh:
-       hba->ufshcd_state = UFSHCD_STATE_ERROR;
-       schedule_work(&hba->feh_workq);
+
+       if (queue_eh_work) {
+               /* handle fatal errors only when link is functional */
+               if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
+                       /* block commands from scsi mid-layer */
+                       scsi_block_requests(hba->host);
+
+                       /* transfer error masks to sticky bits */
+                       hba->saved_err |= hba->errors;
+                       hba->saved_uic_err |= hba->uic_error;
+
+                       hba->ufshcd_state = UFSHCD_STATE_ERROR;
+                       schedule_work(&hba->eh_work);
+               }
+       }
+       /*
+        * if (!queue_eh_work) -
+        * Other errors are either non-fatal where host recovers
+        * itself without s/w intervention or errors that will be
+        * handled by the SCSI core layer.
+        */
 }
 
 /**
@@ -2469,7 +2555,7 @@ static void ufshcd_tmc_handler(struct ufs_hba *hba)
 
        tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
        hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
-       wake_up_interruptible(&hba->ufshcd_tm_wait_queue);
+       wake_up(&hba->tm_wq);
 }
 
 /**
@@ -2481,7 +2567,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
        hba->errors = UFSHCD_ERROR_MASK & intr_status;
        if (hba->errors)
-               ufshcd_err_handler(hba);
+               ufshcd_check_errors(hba);
 
        if (intr_status & UFSHCD_UIC_MASK)
                ufshcd_uic_cmd_compl(hba, intr_status);
@@ -2519,38 +2605,58 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
        return retval;
 }
 
+static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
+{
+       int err = 0;
+       u32 mask = 1 << tag;
+       unsigned long flags;
+
+       if (!test_bit(tag, &hba->outstanding_tasks))
+               goto out;
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       /* poll for max. 1 sec to clear door bell register by h/w */
+       err = ufshcd_wait_for_register(hba,
+                       REG_UTP_TASK_REQ_DOOR_BELL,
+                       mask, 0, 1000, 1000);
+out:
+       return err;
+}
+
 /**
  * ufshcd_issue_tm_cmd - issues task management commands to controller
  * @hba: per adapter instance
- * @lrbp: pointer to local reference block
+ * @lun_id: LUN ID to which TM command is sent
+ * @task_id: task ID to which the TM command is applicable
+ * @tm_function: task management function opcode
+ * @tm_response: task management service response return value
  *
- * Returns SUCCESS/FAILED
+ * Returns non-zero value on error, zero on success.
  */
-static int
-ufshcd_issue_tm_cmd(struct ufs_hba *hba,
-                   struct ufshcd_lrb *lrbp,
-                   u8 tm_function)
+static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
+               u8 tm_function, u8 *tm_response)
 {
        struct utp_task_req_desc *task_req_descp;
        struct utp_upiu_task_req *task_req_upiup;
        struct Scsi_Host *host;
        unsigned long flags;
-       int free_slot = 0;
+       int free_slot;
        int err;
+       int task_tag;
 
        host = hba->host;
 
-       spin_lock_irqsave(host->host_lock, flags);
-
-       /* If task management queue is full */
-       free_slot = ufshcd_get_tm_free_slot(hba);
-       if (free_slot >= hba->nutmrs) {
-               spin_unlock_irqrestore(host->host_lock, flags);
-               dev_err(hba->dev, "Task management queue full\n");
-               err = FAILED;
-               goto out;
-       }
+       /*
+        * Get free slot, sleep if slots are unavailable.
+        * Even though we use wait_event() which sleeps indefinitely,
+        * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
+        */
+       wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
 
+       spin_lock_irqsave(host->host_lock, flags);
        task_req_descp = hba->utmrdl_base_addr;
        task_req_descp += free_slot;
 
@@ -2562,18 +2668,15 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        /* Configure task request UPIU */
        task_req_upiup =
                (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
+       task_tag = hba->nutrs + free_slot;
        task_req_upiup->header.dword_0 =
                UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
-                                             lrbp->lun, lrbp->task_tag);
+                                             lun_id, task_tag);
        task_req_upiup->header.dword_1 =
                UPIU_HEADER_DWORD(0, tm_function, 0, 0);
 
-       task_req_upiup->input_param1 = lrbp->lun;
-       task_req_upiup->input_param1 =
-               cpu_to_be32(task_req_upiup->input_param1);
-       task_req_upiup->input_param2 = lrbp->task_tag;
-       task_req_upiup->input_param2 =
-               cpu_to_be32(task_req_upiup->input_param2);
+       task_req_upiup->input_param1 = cpu_to_be32(lun_id);
+       task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
        /* send command to the controller */
        __set_bit(free_slot, &hba->outstanding_tasks);
@@ -2582,91 +2685,88 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
        spin_unlock_irqrestore(host->host_lock, flags);
 
        /* wait until the task management command is completed */
-       err =
-       wait_event_interruptible_timeout(hba->ufshcd_tm_wait_queue,
-                                        (test_bit(free_slot,
-                                        &hba->tm_condition) != 0),
-                                        60 * HZ);
+       err = wait_event_timeout(hba->tm_wq,
+                       test_bit(free_slot, &hba->tm_condition),
+                       msecs_to_jiffies(TM_CMD_TIMEOUT));
        if (!err) {
-               dev_err(hba->dev,
-                       "Task management command timed-out\n");
-               err = FAILED;
-               goto out;
+               dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
+                               __func__, tm_function);
+               if (ufshcd_clear_tm_cmd(hba, free_slot))
+                       dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
+                                       __func__, free_slot);
+               err = -ETIMEDOUT;
+       } else {
+               err = ufshcd_task_req_compl(hba, free_slot, tm_response);
        }
+
        clear_bit(free_slot, &hba->tm_condition);
-       err = ufshcd_task_req_compl(hba, free_slot);
-out:
+       ufshcd_put_tm_slot(hba, free_slot);
+       wake_up(&hba->tm_tag_wq);
+
        return err;
 }
 
 /**
- * ufshcd_device_reset - reset device and abort all the pending commands
+ * ufshcd_eh_device_reset_handler - device reset handler registered to
+ *                                    scsi layer.
  * @cmd: SCSI command pointer
  *
  * Returns SUCCESS/FAILED
  */
-static int ufshcd_device_reset(struct scsi_cmnd *cmd)
+static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
 {
        struct Scsi_Host *host;
        struct ufs_hba *hba;
        unsigned int tag;
        u32 pos;
        int err;
+       u8 resp = 0xF;
+       struct ufshcd_lrb *lrbp;
+       unsigned long flags;
 
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
 
-       err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_LOGICAL_RESET);
-       if (err == FAILED)
+       lrbp = &hba->lrb[tag];
+       err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
+       if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+               if (!err)
+                       err = resp;
                goto out;
+       }
 
-       for (pos = 0; pos < hba->nutrs; pos++) {
-               if (test_bit(pos, &hba->outstanding_reqs) &&
-                   (hba->lrb[tag].lun == hba->lrb[pos].lun)) {
-
-                       /* clear the respective UTRLCLR register bit */
-                       ufshcd_utrl_clear(hba, pos);
-
-                       clear_bit(pos, &hba->outstanding_reqs);
-
-                       if (hba->lrb[pos].cmd) {
-                               scsi_dma_unmap(hba->lrb[pos].cmd);
-                               hba->lrb[pos].cmd->result =
-                                       DID_ABORT << 16;
-                               hba->lrb[pos].cmd->scsi_done(cmd);
-                               hba->lrb[pos].cmd = NULL;
-                               clear_bit_unlock(pos, &hba->lrb_in_use);
-                               wake_up(&hba->dev_cmd.tag_wq);
-                       }
+       /* clear the commands that were pending for corresponding LUN */
+       for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
+               if (hba->lrb[pos].lun == lrbp->lun) {
+                       err = ufshcd_clear_cmd(hba, pos);
+                       if (err)
+                               break;
                }
-       } /* end of for */
+       }
+       spin_lock_irqsave(host->host_lock, flags);
+       ufshcd_transfer_req_compl(hba);
+       spin_unlock_irqrestore(host->host_lock, flags);
 out:
+       if (!err) {
+               err = SUCCESS;
+       } else {
+               dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+               err = FAILED;
+       }
        return err;
 }
 
-/**
- * ufshcd_host_reset - Main reset function registered with scsi layer
- * @cmd: SCSI command pointer
- *
- * Returns SUCCESS/FAILED
- */
-static int ufshcd_host_reset(struct scsi_cmnd *cmd)
-{
-       struct ufs_hba *hba;
-
-       hba = shost_priv(cmd->device->host);
-
-       if (hba->ufshcd_state == UFSHCD_STATE_RESET)
-               return SUCCESS;
-
-       return ufshcd_do_reset(hba);
-}
-
 /**
  * ufshcd_abort - abort a specific command
  * @cmd: SCSI command pointer
  *
+ * Abort the pending command in device by sending UFS_ABORT_TASK task management
+ * command, and in host controller by clearing the door-bell register. There can
+ * be race between controller sending the command to the device while abort is
+ * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
+ * really issued and then try to abort it.
+ *
  * Returns SUCCESS/FAILED
  */
 static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -2675,33 +2775,68 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        struct ufs_hba *hba;
        unsigned long flags;
        unsigned int tag;
-       int err;
+       int err = 0;
+       int poll_cnt;
+       u8 resp = 0xF;
+       struct ufshcd_lrb *lrbp;
 
        host = cmd->device->host;
        hba = shost_priv(host);
        tag = cmd->request->tag;
 
-       spin_lock_irqsave(host->host_lock, flags);
+       /* If command is already aborted/completed, return SUCCESS */
+       if (!(test_bit(tag, &hba->outstanding_reqs)))
+               goto out;
 
-       /* check if command is still pending */
-       if (!(test_bit(tag, &hba->outstanding_reqs))) {
-               err = FAILED;
-               spin_unlock_irqrestore(host->host_lock, flags);
+       lrbp = &hba->lrb[tag];
+       for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+               err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+                               UFS_QUERY_TASK, &resp);
+               if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+                       /* cmd pending in the device */
+                       break;
+               } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+                       u32 reg;
+
+                       /*
+                        * cmd not pending in the device, check if it is
+                        * in transition.
+                        */
+                       reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+                       if (reg & (1 << tag)) {
+                               /* sleep for max. 200us to stabilize */
+                               usleep_range(100, 200);
+                               continue;
+                       }
+                       /* command completed already */
+                       goto out;
+               } else {
+                       if (!err)
+                               err = resp; /* service response error */
+                       goto out;
+               }
+       }
+
+       if (!poll_cnt) {
+               err = -EBUSY;
                goto out;
        }
-       spin_unlock_irqrestore(host->host_lock, flags);
 
-       err = ufshcd_issue_tm_cmd(hba, &hba->lrb[tag], UFS_ABORT_TASK);
-       if (err == FAILED)
+       err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+                       UFS_ABORT_TASK, &resp);
+       if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+               if (!err)
+                       err = resp; /* service response error */
+               goto out;
+       }
+
+       err = ufshcd_clear_cmd(hba, tag);
+       if (err)
                goto out;
 
        scsi_dma_unmap(cmd);
 
        spin_lock_irqsave(host->host_lock, flags);
-
-       /* clear the respective UTRLCLR register bit */
-       ufshcd_utrl_clear(hba, tag);
-
        __clear_bit(tag, &hba->outstanding_reqs);
        hba->lrb[tag].cmd = NULL;
        spin_unlock_irqrestore(host->host_lock, flags);
@@ -2709,6 +2844,129 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
        clear_bit_unlock(tag, &hba->lrb_in_use);
        wake_up(&hba->dev_cmd.tag_wq);
 out:
+       if (!err) {
+               err = SUCCESS;
+       } else {
+               dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+               err = FAILED;
+       }
+
+       return err;
+}
+
+/**
+ * ufshcd_host_reset_and_restore - reset and restore host controller
+ * @hba: per-adapter instance
+ *
+ * Note that host controller reset may issue DME_RESET to
+ * local and remote (device) Uni-Pro stack and the attributes
+ * are reset to default state.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
+{
+       int err;
+       async_cookie_t cookie;
+       unsigned long flags;
+
+       /* Reset the host controller */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_hba_stop(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       err = ufshcd_hba_enable(hba);
+       if (err)
+               goto out;
+
+       /* Establish the link again and restore the device */
+       cookie = async_schedule(ufshcd_async_scan, hba);
+       /* wait for async scan to be completed */
+       async_synchronize_cookie(++cookie);
+       if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+               err = -EIO;
+out:
+       if (err)
+               dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
+
+       return err;
+}
+
+/**
+ * ufshcd_reset_and_restore - reset and re-initialize host/device
+ * @hba: per-adapter instance
+ *
+ * Reset and recover device, host and re-establish link. This
+ * is helpful to recover the communication in fatal error conditions.
+ *
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_reset_and_restore(struct ufs_hba *hba)
+{
+       int err = 0;
+       unsigned long flags;
+
+       err = ufshcd_host_reset_and_restore(hba);
+
+       /*
+        * After reset the door-bell might be cleared, complete
+        * outstanding requests in s/w here.
+        */
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       ufshcd_transfer_req_compl(hba);
+       ufshcd_tmc_handler(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       return err;
+}
+
+/**
+ * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
+ * @cmd - SCSI command pointer
+ *
+ * Returns SUCCESS/FAILED
+ */
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
+{
+       int err;
+       unsigned long flags;
+       struct ufs_hba *hba;
+
+       hba = shost_priv(cmd->device->host);
+
+       /*
+        * Check if there is any race with fatal error handling.
+        * If so, wait for it to complete. Even though fatal error
+        * handling does reset and restore in some cases, don't assume
+        * anything out of it. We are just avoiding race here.
+        */
+       do {
+               spin_lock_irqsave(hba->host->host_lock, flags);
+               if (!(work_pending(&hba->eh_work) ||
+                               hba->ufshcd_state == UFSHCD_STATE_RESET))
+                       break;
+               spin_unlock_irqrestore(hba->host->host_lock, flags);
+               dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
+               flush_work(&hba->eh_work);
+       } while (1);
+
+       hba->ufshcd_state = UFSHCD_STATE_RESET;
+       ufshcd_set_eh_in_progress(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+       err = ufshcd_reset_and_restore(hba);
+
+       spin_lock_irqsave(hba->host->host_lock, flags);
+       if (!err) {
+               err = SUCCESS;
+               hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+       } else {
+               err = FAILED;
+               hba->ufshcd_state = UFSHCD_STATE_ERROR;
+       }
+       ufshcd_clear_eh_in_progress(hba);
+       spin_unlock_irqrestore(hba->host->host_lock, flags);
+
        return err;
 }
 
@@ -2737,8 +2995,13 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
                goto out;
 
        ufshcd_force_reset_auto_bkops(hba);
-       scsi_scan_host(hba->host);
-       pm_runtime_put_sync(hba->dev);
+       hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+
+       /* If we are in error handling context no need to scan the host */
+       if (!ufshcd_eh_in_progress(hba)) {
+               scsi_scan_host(hba->host);
+               pm_runtime_put_sync(hba->dev);
+       }
 out:
        return;
 }
@@ -2751,8 +3014,8 @@ static struct scsi_host_template ufshcd_driver_template = {
        .slave_alloc            = ufshcd_slave_alloc,
        .slave_destroy          = ufshcd_slave_destroy,
        .eh_abort_handler       = ufshcd_abort,
-       .eh_device_reset_handler = ufshcd_device_reset,
-       .eh_host_reset_handler  = ufshcd_host_reset,
+       .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
+       .eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
        .this_id                = -1,
        .sg_tablesize           = SG_ALL,
        .cmd_per_lun            = UFSHCD_CMD_PER_LUN,
@@ -2916,10 +3179,11 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
        host->max_cmd_len = MAX_CDB_SIZE;
 
        /* Initailize wait queue for task management */
-       init_waitqueue_head(&hba->ufshcd_tm_wait_queue);
+       init_waitqueue_head(&hba->tm_wq);
+       init_waitqueue_head(&hba->tm_tag_wq);
 
        /* Initialize work queues */
-       INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+       INIT_WORK(&hba->eh_work, ufshcd_err_handler);
        INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
 
        /* Initialize UIC command mutex */
index 577679a2d1898f919826ec8f27e18213414b908d..acf318e338eda297d922d644389e2389f941699d 100644 (file)
@@ -174,15 +174,21 @@ struct ufs_dev_cmd {
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
- * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_wq: wait queue for task management
+ * @tm_tag_wq: wait queue for free task management slots
+ * @tm_slots_in_use: bit map of task management request slots in use
  * @pwr_done: completion for power mode change
  * @tm_condition: condition variable for task management
  * @ufshcd_state: UFSHCD states
+ * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
- * @feh_workq: Work queue for fatal controller error handling
+ * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
+ * @uic_error: UFS interconnect layer error status
+ * @saved_err: sticky error mask
+ * @saved_uic_err: sticky UIC error mask
  * @dev_cmd: ufs device management command information
  * @auto_bkops_enabled: to track whether bkops is enabled in device
  */
@@ -217,21 +223,27 @@ struct ufs_hba {
        struct uic_command *active_uic_cmd;
        struct mutex uic_cmd_mutex;
 
-       wait_queue_head_t ufshcd_tm_wait_queue;
+       wait_queue_head_t tm_wq;
+       wait_queue_head_t tm_tag_wq;
        unsigned long tm_condition;
+       unsigned long tm_slots_in_use;
 
        struct completion *pwr_done;
 
        u32 ufshcd_state;
+       u32 eh_flags;
        u32 intr_mask;
        u16 ee_ctrl_mask;
 
        /* Work Queues */
-       struct work_struct feh_workq;
+       struct work_struct eh_work;
        struct work_struct eeh_work;
 
        /* HBA Errors */
        u32 errors;
+       u32 uic_error;
+       u32 saved_err;
+       u32 saved_uic_err;
 
        /* Device management request data */
        struct ufs_dev_cmd dev_cmd;
@@ -263,6 +275,8 @@ static inline void check_upiu_size(void)
                GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
 }
 
+extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
+extern int ufshcd_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
index 0475c6619a68109dd100f47cd7f41b793aea8aa9..9abc7e32b43d93350a710f757f651ad3a586248e 100644 (file)
@@ -304,10 +304,10 @@ enum {
  * @size: size of physical segment DW-3
  */
 struct ufshcd_sg_entry {
-       u32    base_addr;
-       u32    upper_addr;
-       u32    reserved;
-       u32    size;
+       __le32    base_addr;
+       __le32    upper_addr;
+       __le32    reserved;
+       __le32    size;
 };
 
 /**
@@ -330,10 +330,10 @@ struct utp_transfer_cmd_desc {
  * @dword3: Descriptor Header DW3
  */
 struct request_desc_header {
-       u32 dword_0;
-       u32 dword_1;
-       u32 dword_2;
-       u32 dword_3;
+       __le32 dword_0;
+       __le32 dword_1;
+       __le32 dword_2;
+       __le32 dword_3;
 };
 
 /**
@@ -352,16 +352,16 @@ struct utp_transfer_req_desc {
        struct request_desc_header header;
 
        /* DW 4-5*/
-       u32  command_desc_base_addr_lo;
-       u32  command_desc_base_addr_hi;
+       __le32  command_desc_base_addr_lo;
+       __le32  command_desc_base_addr_hi;
 
        /* DW 6 */
-       u16  response_upiu_length;
-       u16  response_upiu_offset;
+       __le16  response_upiu_length;
+       __le16  response_upiu_offset;
 
        /* DW 7 */
-       u16  prd_table_length;
-       u16  prd_table_offset;
+       __le16  prd_table_length;
+       __le16  prd_table_offset;
 };
 
 /**
@@ -376,10 +376,10 @@ struct utp_task_req_desc {
        struct request_desc_header header;
 
        /* DW 4-11 */
-       u32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
+       __le32 task_req_upiu[TASK_REQ_UPIU_SIZE_DWORDS];
 
        /* DW 12-19 */
-       u32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
+       __le32 task_rsp_upiu[TASK_RSP_UPIU_SIZE_DWORDS];
 };
 
 #endif /* End of Header */
index db3b494e5926a423866e0ad3a18b15b6378d3cca..d4727b3394749bcc6ab4265a19c20f83c5328bea 100644 (file)
@@ -73,17 +73,12 @@ struct virtio_scsi_vq {
  * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
  * (each virtqueue's affinity is set to the CPU that "owns" the queue).
  *
- * An interesting effect of this policy is that only writes to req_vq need to
- * take the tgt_lock.  Read can be done outside the lock because:
+ * tgt_lock is held to serialize reading and writing req_vq. Reading req_vq
+ * could be done locklessly, but we do not do it yet.
  *
- * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
- *   In that case, no other CPU is reading req_vq: even if they were in
- *   virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
- *
- * - reads of req_vq only occur when the target is not idle (reqs != 0).
- *   A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
- *
- * Similarly, decrements of reqs are never concurrent with writes of req_vq.
+ * Decrements of reqs are never concurrent with writes of req_vq: before the
+ * decrement reqs will be != 0; after the decrement the virtqueue completion
+ * routine will not use the req_vq so it can be changed by a new request.
  * Thus they can happen outside the tgt_lock, provided of course we make reqs
  * an atomic_t.
  */
@@ -204,7 +199,6 @@ static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
                        set_driver_byte(sc, DRIVER_SENSE);
        }
 
-       mempool_free(cmd, virtscsi_cmd_pool);
        sc->scsi_done(sc);
 
        atomic_dec(&tgt->reqs);
@@ -238,38 +232,6 @@ static void virtscsi_req_done(struct virtqueue *vq)
        int index = vq->index - VIRTIO_SCSI_VQ_BASE;
        struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
 
-       /*
-        * Read req_vq before decrementing the reqs field in
-        * virtscsi_complete_cmd.
-        *
-        * With barriers:
-        *
-        *      CPU #0                  virtscsi_queuecommand_multi (CPU #1)
-        *      ------------------------------------------------------------
-        *      lock vq_lock
-        *      read req_vq
-        *      read reqs (reqs = 1)
-        *      write reqs (reqs = 0)
-        *                              increment reqs (reqs = 1)
-        *                              write req_vq
-        *
-        * Possible reordering without barriers:
-        *
-        *      CPU #0                  virtscsi_queuecommand_multi (CPU #1)
-        *      ------------------------------------------------------------
-        *      lock vq_lock
-        *      read reqs (reqs = 1)
-        *      write reqs (reqs = 0)
-        *                              increment reqs (reqs = 1)
-        *                              write req_vq
-        *      read (wrong) req_vq
-        *
-        * We do not need a full smp_rmb, because req_vq is required to get
-        * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
-        * in the virtqueue as the user token.
-        */
-       smp_read_barrier_depends();
-
        virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
 };
 
@@ -279,8 +241,6 @@ static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
 
        if (cmd->comp)
                complete_all(cmd->comp);
-       else
-               mempool_free(cmd, virtscsi_cmd_pool);
 }
 
 static void virtscsi_ctrl_done(struct virtqueue *vq)
@@ -496,10 +456,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
                                 struct virtio_scsi_vq *req_vq,
                                 struct scsi_cmnd *sc)
 {
-       struct virtio_scsi_cmd *cmd;
-       int ret;
-
        struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
+       struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+
        BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
 
        /* TODO: check feature bit and fail if unsupported?  */
@@ -508,11 +467,6 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
        dev_dbg(&sc->device->sdev_gendev,
                "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
 
-       ret = SCSI_MLQUEUE_HOST_BUSY;
-       cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
-       if (!cmd)
-               goto out;
-
        memset(cmd, 0, sizeof(*cmd));
        cmd->sc = sc;
        cmd->req.cmd = (struct virtio_scsi_cmd_req){
@@ -531,13 +485,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
 
        if (virtscsi_kick_cmd(req_vq, cmd,
                              sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
-                             GFP_ATOMIC) == 0)
-               ret = 0;
-       else
-               mempool_free(cmd, virtscsi_cmd_pool);
-
-out:
-       return ret;
+                             GFP_ATOMIC) != 0)
+               return SCSI_MLQUEUE_HOST_BUSY;
+       return 0;
 }
 
 static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
@@ -560,12 +510,8 @@ static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
 
        spin_lock_irqsave(&tgt->tgt_lock, flags);
 
-       /*
-        * The memory barrier after atomic_inc_return matches
-        * the smp_read_barrier_depends() in virtscsi_req_done.
-        */
        if (atomic_inc_return(&tgt->reqs) > 1)
-               vq = ACCESS_ONCE(tgt->req_vq);
+               vq = tgt->req_vq;
        else {
                queue_num = smp_processor_id();
                while (unlikely(queue_num >= vscsi->num_queues))
@@ -683,6 +629,7 @@ static struct scsi_host_template virtscsi_host_template_single = {
        .name = "Virtio SCSI HBA",
        .proc_name = "virtio_scsi",
        .this_id = -1,
+       .cmd_size = sizeof(struct virtio_scsi_cmd),
        .queuecommand = virtscsi_queuecommand_single,
        .eh_abort_handler = virtscsi_abort,
        .eh_device_reset_handler = virtscsi_device_reset,
@@ -699,6 +646,7 @@ static struct scsi_host_template virtscsi_host_template_multi = {
        .name = "Virtio SCSI HBA",
        .proc_name = "virtio_scsi",
        .this_id = -1,
+       .cmd_size = sizeof(struct virtio_scsi_cmd),
        .queuecommand = virtscsi_queuecommand_multi,
        .eh_abort_handler = virtscsi_abort,
        .eh_device_reset_handler = virtscsi_device_reset,
index 25ac6283b9c753a6af74055d590a1ed613004be1..a2594afe05c733e7e7218d14dbba55de5785502f 100644 (file)
@@ -263,16 +263,16 @@ static inline struct osd_cdb_head *osd_cdb_head(struct osd_cdb *ocdb)
  * Ex name = FORMAT_OSD we have OSD_ACT_FORMAT_OSD && OSDv1_ACT_FORMAT_OSD
  */
 #define OSD_ACT___(Name, Num) \
-       OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num), \
-       OSDv1_ACT_##Name = __constant_cpu_to_be16(0x8800 + Num),
+       OSD_ACT_##Name = cpu_to_be16(0x8880 + Num), \
+       OSDv1_ACT_##Name = cpu_to_be16(0x8800 + Num),
 
 /* V2 only actions */
 #define OSD_ACT_V2(Name, Num) \
-       OSD_ACT_##Name = __constant_cpu_to_be16(0x8880 + Num),
+       OSD_ACT_##Name = cpu_to_be16(0x8880 + Num),
 
 #define OSD_ACT_V1_V2(Name, Num1, Num2) \
-       OSD_ACT_##Name = __constant_cpu_to_be16(Num2), \
-       OSDv1_ACT_##Name = __constant_cpu_to_be16(Num1),
+       OSD_ACT_##Name = cpu_to_be16(Num2), \
+       OSDv1_ACT_##Name = cpu_to_be16(Num1),
 
 enum osd_service_actions {
        OSD_ACT_V2(OBJECT_STRUCTURE_CHECK,      0x00)
index dd7c998221b3e7a0253c11426a7940a328279248..e016e2ac38df8f6c570980a4624e9ccaefd0dd5e 100644 (file)
@@ -133,6 +133,15 @@ struct scsi_cmnd {
        unsigned char tag;      /* SCSI-II queued command tag */
 };
 
+/*
+ * Return the driver private allocation behind the command.
+ * Only works if cmd_size is set in the host template.
+ */
+static inline void *scsi_cmd_priv(struct scsi_cmnd *cmd)
+{
+       return cmd + 1;
+}
+
 /* make sure not to use it with REQ_TYPE_BLOCK_PC commands */
 static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
 {
index 20fdfc2526adb1a2bd63bae4564b5faf2428256a..36c4114ed9bc2ce47dfab7b5beb79e2a64a68aa5 100644 (file)
@@ -4,17 +4,17 @@
 #include <linux/device.h>
 
 struct module;
+struct request;
 struct scsi_cmnd;
 struct scsi_device;
-struct request;
-struct request_queue;
-
 
 struct scsi_driver {
        struct module           *owner;
        struct device_driver    gendrv;
 
        void (*rescan)(struct device *);
+       int (*init_command)(struct scsi_cmnd *);
+       void (*uninit_command)(struct scsi_cmnd *);
        int (*done)(struct scsi_cmnd *);
        int (*eh_action)(struct scsi_cmnd *, int);
 };
@@ -31,8 +31,5 @@ extern int scsi_register_interface(struct class_interface *);
 
 int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req);
 int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req);
-int scsi_prep_state_check(struct scsi_device *sdev, struct request *req);
-int scsi_prep_return(struct request_queue *q, struct request *req, int ret);
-int scsi_prep_fn(struct request_queue *, struct request *);
 
 #endif /* _SCSI_SCSI_DRIVER_H */