1 /******************************************************************************
2 * QLOGIC LINUX SOFTWARE
4 * QLogic QLA1280 (Ultra2) and QLA12160 (Ultra3) SCSI driver
5 * Copyright (C) 2000 Qlogic Corporation (www.qlogic.com)
6 * Copyright (C) 2001-2004 Jes Sorensen, Wild Open Source Inc.
7 * Copyright (C) 2003-2004 Christoph Hellwig
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the
11 * Free Software Foundation; either version 2, or (at your option) any
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 ******************************************************************************/
20 #define QLA1280_VERSION "3.26"
21 /*****************************************************************************
23 Rev 3.26, January 16, 2006 Jes Sorensen
24 - Ditch all < 2.6 support
25 Rev 3.25.1, February 10, 2005 Christoph Hellwig
26 - use pci_map_single to map non-S/G requests
27 - remove qla1280_proc_info
28 Rev 3.25, September 28, 2004, Christoph Hellwig
29 - add support for ISP1020/1040
30 - don't include "scsi.h" anymore for 2.6.x
31 Rev 3.24.4 June 7, 2004 Christoph Hellwig
32 - restructure firmware loading, cleanup initialization code
33 - prepare support for ISP1020/1040 chips
34 Rev 3.24.3 January 19, 2004, Jes Sorensen
35 - Handle PCI DMA mask settings correctly
36 - Correct order of error handling in probe_one, free_irq should not
37 be called if request_irq failed
38 Rev 3.24.2 January 19, 2004, James Bottomley & Andrew Vasquez
39 - Big endian fixes (James)
40 - Remove bogus IOCB content on zero data transfer commands (Andrew)
41 Rev 3.24.1 January 5, 2004, Jes Sorensen
42 - Initialize completion queue to avoid OOPS on probe
43 - Handle interrupts during mailbox testing
44 Rev 3.24 November 17, 2003, Christoph Hellwig
45 - use struct list_head for completion queue
46 - avoid old Scsi_FOO typedefs
47 - cleanup 2.4 compat glue a bit
48 - use <scsi/scsi_*.h> headers on 2.6 instead of "scsi.h"
49 - make initialization for memory mapped vs port I/O more similar
50 - remove broken pci config space manipulation
52 - this is an almost perfect 2.6 scsi driver now! ;)
53 Rev 3.23.39 December 17, 2003, Jes Sorensen
54 - Delete completion queue from srb if mailbox command failed to
55 to avoid qla1280_done completeting qla1280_error_action's
57 - Reduce arguments for qla1280_done
58 Rev 3.23.38 October 18, 2003, Christoph Hellwig
59 - Convert to new-style hotplugable driver for 2.6
60 - Fix missing scsi_unregister/scsi_host_put on HBA removal
61 - Kill some more cruft
62 Rev 3.23.37 October 1, 2003, Jes Sorensen
63 - Make MMIO depend on CONFIG_X86_VISWS instead of yet another
65 - Clean up locking in probe path
66 Rev 3.23.36 October 1, 2003, Christoph Hellwig
67 - queuecommand only ever receives new commands - clear flags
68 - Reintegrate lost fixes from Linux 2.5
69 Rev 3.23.35 August 14, 2003, Jes Sorensen
71 Rev 3.23.34 July 23, 2003, Jes Sorensen
72 - Remove pointless TRUE/FALSE macros
73 - Clean up vchan handling
74 Rev 3.23.33 July 3, 2003, Jes Sorensen
75 - Don't define register access macros before define determining MMIO.
76 This just happend to work out on ia64 but not elsewhere.
77 - Don't try and read from the card while it is in reset as
78 it won't respond and causes an MCA
79 Rev 3.23.32 June 23, 2003, Jes Sorensen
80 - Basic support for boot time arguments
81 Rev 3.23.31 June 8, 2003, Jes Sorensen
82 - Reduce boot time messages
83 Rev 3.23.30 June 6, 2003, Jes Sorensen
84 - Do not enable sync/wide/ppr before it has been determined
85 that the target device actually supports it
86 - Enable DMA arbitration for multi channel controllers
87 Rev 3.23.29 June 3, 2003, Jes Sorensen
89 Rev 3.23.28 June 3, 2003, Jes Sorensen
90 - Eliminate duplicate marker commands on bus resets
91 - Handle outstanding commands appropriately on bus/device resets
92 Rev 3.23.27 May 28, 2003, Jes Sorensen
93 - Remove bogus input queue code, let the Linux SCSI layer do the work
94 - Clean up NVRAM handling, only read it once from the card
95 - Add a number of missing default nvram parameters
96 Rev 3.23.26 Beta May 28, 2003, Jes Sorensen
97 - Use completion queue for mailbox commands instead of busy wait
98 Rev 3.23.25 Beta May 27, 2003, James Bottomley
99 - Migrate to use new error handling code
100 Rev 3.23.24 Beta May 21, 2003, James Bottomley
102 - Cleanup data direction code
103 Rev 3.23.23 Beta May 12, 2003, Jes Sorensen
104 - Switch to using MMIO instead of PIO
105 Rev 3.23.22 Beta April 15, 2003, Jes Sorensen
106 - Fix PCI parity problem with 12160 during reset.
107 Rev 3.23.21 Beta April 14, 2003, Jes Sorensen
108 - Use pci_map_page()/pci_unmap_page() instead of map_single version.
109 Rev 3.23.20 Beta April 9, 2003, Jes Sorensen
110 - Remove < 2.4.x support
111 - Introduce HOST_LOCK to make the spin lock changes portable.
112 - Remove a bunch of idiotic and unnecessary typedef's
113 - Kill all leftovers of target-mode support which never worked anyway
114 Rev 3.23.19 Beta April 11, 2002, Linus Torvalds
115 - Do qla1280_pci_config() before calling request_irq() and
117 - Use pci_dma_hi32() to handle upper word of DMA addresses instead
119 - Hand correct arguments to free_irq() in case of failure
120 Rev 3.23.18 Beta April 11, 2002, Jes Sorensen
121 - Run source through Lindent and clean up the output
122 Rev 3.23.17 Beta April 11, 2002, Jes Sorensen
123 - Update SCSI firmware to qla1280 v8.15.00 and qla12160 v10.04.32
124 Rev 3.23.16 Beta March 19, 2002, Jes Sorensen
125 - Rely on mailbox commands generating interrupts - do not
126 run qla1280_isr() from ql1280_mailbox_command()
127 - Remove device_reg_t
128 - Integrate ql12160_set_target_parameters() with 1280 version
129 - Make qla1280_setup() non static
130 - Do not call qla1280_check_for_dead_scsi_bus() on every I/O request
131 sent to the card - this command pauses the firmware!!!
132 Rev 3.23.15 Beta March 19, 2002, Jes Sorensen
133 - Clean up qla1280.h - remove obsolete QL_DEBUG_LEVEL_x definitions
134 - Remove a pile of pointless and confusing (srb_t **) and
135 (scsi_lu_t *) typecasts
136 - Explicit mark that we do not use the new error handling (for now)
137 - Remove scsi_qla_host_t and use 'struct' instead
138 - Remove in_abort, watchdog_enabled, dpc, dpc_sched, bios_enabled,
139 pci_64bit_slot flags which weren't used for anything anyway
140 - Grab host->host_lock while calling qla1280_isr() from abort()
141 - Use spin_lock()/spin_unlock() in qla1280_intr_handler() - we
142 do not need to save/restore flags in the interrupt handler
143 - Enable interrupts early (before any mailbox access) in preparation
144 for cleaning up the mailbox handling
145 Rev 3.23.14 Beta March 14, 2002, Jes Sorensen
146 - Further cleanups. Remove all trace of QL_DEBUG_LEVEL_x and replace
147 it with proper use of dprintk().
148 - Make qla1280_print_scsi_cmd() and qla1280_dump_buffer() both take
149 a debug level argument to determine if data is to be printed
150 - Add KERN_* info to printk()
151 Rev 3.23.13 Beta March 14, 2002, Jes Sorensen
152 - Significant cosmetic cleanups
153 - Change debug code to use dprintk() and remove #if mess
154 Rev 3.23.12 Beta March 13, 2002, Jes Sorensen
155 - More cosmetic cleanups, fix places treating return as function
156 - use cpu_relax() in qla1280_debounce_register()
157 Rev 3.23.11 Beta March 13, 2002, Jes Sorensen
158 - Make it compile under 2.5.5
159 Rev 3.23.10 Beta October 1, 2001, Jes Sorensen
160 - Do no typecast short * to long * in QL1280BoardTbl, this
161 broke miserably on big endian boxes
162 Rev 3.23.9 Beta September 30, 2001, Jes Sorensen
163 - Remove pre 2.2 hack for checking for reentrance in interrupt handler
164 - Make data types used to receive from SCSI_{BUS,TCN,LUN}_32
165 unsigned int to match the types from struct scsi_cmnd
166 Rev 3.23.8 Beta September 29, 2001, Jes Sorensen
167 - Remove bogus timer_t typedef from qla1280.h
168 - Remove obsolete pre 2.2 PCI setup code, use proper #define's
169 for PCI_ values, call pci_set_master()
170 - Fix memleak of qla1280_buffer on module unload
171 - Only compile module parsing code #ifdef MODULE - should be
172 changed to use individual MODULE_PARM's later
173 - Remove dummy_buffer that was never modified nor printed
174 - ENTER()/LEAVE() are noops unless QL_DEBUG_LEVEL_3, hence remove
175 #ifdef QL_DEBUG_LEVEL_3/#endif around ENTER()/LEAVE() calls
176 - Remove \r from print statements, this is Linux, not DOS
177 - Remove obsolete QLA1280_{SCSILU,INTR,RING}_{LOCK,UNLOCK}
179 - Remove C++ compile hack in header file as Linux driver are not
180 supposed to be compiled as C++
181 - Kill MS_64BITS macro as it makes the code more readable
182 - Remove unnecessary flags.in_interrupts bit
183 Rev 3.23.7 Beta August 20, 2001, Jes Sorensen
184 - Dont' check for set flags on q->q_flag one by one in qla1280_next()
185 - Check whether the interrupt was generated by the QLA1280 before
187 - qla1280_status_entry(): Only zero out part of sense_buffer that
188 is not being copied into
189 - Remove more superflouous typecasts
190 - qla1280_32bit_start_scsi() replace home-brew memcpy() with memcpy()
191 Rev 3.23.6 Beta August 20, 2001, Tony Luck, Intel
192 - Don't walk the entire list in qla1280_putq_t() just to directly
193 grab the pointer to the last element afterwards
194 Rev 3.23.5 Beta August 9, 2001, Jes Sorensen
195 - Don't use IRQF_DISABLED, it's use is deprecated for this kinda driver
196 Rev 3.23.4 Beta August 8, 2001, Jes Sorensen
197 - Set dev->max_sectors to 1024
198 Rev 3.23.3 Beta August 6, 2001, Jes Sorensen
199 - Provide compat macros for pci_enable_device(), pci_find_subsys()
200 and scsi_set_pci_device()
201 - Call scsi_set_pci_device() for all devices
202 - Reduce size of kernel version dependent device probe code
203 - Move duplicate probe/init code to separate function
204 - Handle error if qla1280_mem_alloc() fails
205 - Kill OFFSET() macro and use Linux's PCI definitions instead
206 - Kill private structure defining PCI config space (struct config_reg)
207 - Only allocate I/O port region if not in MMIO mode
208 - Remove duplicate (unused) sanity check of sife of srb_t
209 Rev 3.23.2 Beta August 6, 2001, Jes Sorensen
210 - Change home-brew memset() implementations to use memset()
211 - Remove all references to COMTRACE() - accessing a PC's COM2 serial
212 port directly is not legal under Linux.
213 Rev 3.23.1 Beta April 24, 2001, Jes Sorensen
214 - Remove pre 2.2 kernel support
215 - clean up 64 bit DMA setting to use 2.4 API (provide backwards compat)
216 - Fix MMIO access to use readl/writel instead of directly
217 dereferencing pointers
218 - Nuke MSDOS debugging code
219 - Change true/false data types to int from uint8_t
220 - Use int for counters instead of uint8_t etc.
221 - Clean up size & byte order conversion macro usage
222 Rev 3.23 Beta January 11, 2001 BN Qlogic
223 - Added check of device_id when handling non
224 QLA12160s during detect().
225 Rev 3.22 Beta January 5, 2001 BN Qlogic
226 - Changed queue_task() to schedule_task()
227 for kernels 2.4.0 and higher.
228 Note: 2.4.0-testxx kernels released prior to
229 the actual 2.4.0 kernel release on January 2001
230 will get compile/link errors with schedule_task().
231 Please update your kernel to released 2.4.0 level,
232 or comment lines in this file flagged with 3.22
233 to resolve compile/link error of schedule_task().
234 - Added -DCONFIG_SMP in addition to -D__SMP__
235 in Makefile for 2.4.0 builds of driver as module.
236 Rev 3.21 Beta January 4, 2001 BN Qlogic
237 - Changed criteria of 64/32 Bit mode of HBA
238 operation according to BITS_PER_LONG rather
239 than HBA's NVRAM setting of >4Gig memory bit;
240 so that the HBA auto-configures without the need
241 to setup each system individually.
242 Rev 3.20 Beta December 5, 2000 BN Qlogic
243 - Added priority handling to IA-64 onboard SCSI
244 ISP12160 chip for kernels greater than 2.3.18.
245 - Added irqrestore for qla1280_intr_handler.
246 - Enabled /proc/scsi/qla1280 interface.
247 - Clear /proc/scsi/qla1280 counters in detect().
248 Rev 3.19 Beta October 13, 2000 BN Qlogic
249 - Declare driver_template for new kernel
250 (2.4.0 and greater) scsi initialization scheme.
251 - Update /proc/scsi entry for 2.3.18 kernels and
253 Rev 3.18 Beta October 10, 2000 BN Qlogic
254 - Changed scan order of adapters to map
255 the QLA12160 followed by the QLA1280.
256 Rev 3.17 Beta September 18, 2000 BN Qlogic
257 - Removed warnings for 32 bit 2.4.x compiles
258 - Corrected declared size for request and response
259 DMA addresses that are kept in each ha
260 Rev. 3.16 Beta August 25, 2000 BN Qlogic
261 - Corrected 64 bit addressing issue on IA-64
262 where the upper 32 bits were not properly
263 passed to the RISC engine.
264 Rev. 3.15 Beta August 22, 2000 BN Qlogic
265 - Modified qla1280_setup_chip to properly load
266 ISP firmware for greater that 4 Gig memory on IA-64
267 Rev. 3.14 Beta August 16, 2000 BN Qlogic
268 - Added setting of dma_mask to full 64 bit
269 if flags.enable_64bit_addressing is set in NVRAM
270 Rev. 3.13 Beta August 16, 2000 BN Qlogic
271 - Use new PCI DMA mapping APIs for 2.4.x kernel
272 Rev. 3.12 July 18, 2000 Redhat & BN Qlogic
273 - Added check of pci_enable_device to detect() for 2.3.x
274 - Use pci_resource_start() instead of
275 pdev->resource[0].start in detect() for 2.3.x
276 - Updated driver version
277 Rev. 3.11 July 14, 2000 BN Qlogic
278 - Updated SCSI Firmware to following versions:
281 - Updated driver version to 3.11
282 Rev. 3.10 June 23, 2000 BN Qlogic
283 - Added filtering of AMI SubSys Vendor ID devices
285 - DEBUG_QLA1280 undefined and new version BN Qlogic
286 Rev. 3.08b May 9, 2000 MD Dell
287 - Added logic to check against AMI subsystem vendor ID
288 Rev. 3.08 May 4, 2000 DG Qlogic
289 - Added logic to check for PCI subsystem ID.
290 Rev. 3.07 Apr 24, 2000 DG & BN Qlogic
291 - Updated SCSI Firmware to following versions:
294 Rev. 3.06 Apr 12, 2000 DG & BN Qlogic
295 - Internal revision; not released
296 Rev. 3.05 Mar 28, 2000 DG & BN Qlogic
297 - Edit correction for virt_to_bus and PROC.
298 Rev. 3.04 Mar 28, 2000 DG & BN Qlogic
299 - Merge changes from ia64 port.
300 Rev. 3.03 Mar 28, 2000 BN Qlogic
301 - Increase version to reflect new code drop with compile fix
302 of issue with inclusion of linux/spinlock for 2.3 kernels
303 Rev. 3.02 Mar 15, 2000 BN Qlogic
304 - Merge qla1280_proc_info from 2.10 code base
305 Rev. 3.01 Feb 10, 2000 BN Qlogic
306 - Corrected code to compile on a 2.2.x kernel.
307 Rev. 3.00 Jan 17, 2000 DG Qlogic
308 - Added 64-bit support.
309 Rev. 2.07 Nov 9, 1999 DG Qlogic
310 - Added new routine to set target parameters for ISP12160.
311 Rev. 2.06 Sept 10, 1999 DG Qlogic
312 - Added support for ISP12160 Ultra 3 chip.
313 Rev. 2.03 August 3, 1999 Fred Lewis, Intel DuPont
314 - Modified code to remove errors generated when compiling with
315 Cygnus IA64 Compiler.
316 - Changed conversion of pointers to unsigned longs instead of integers.
317 - Changed type of I/O port variables from uint32_t to unsigned long.
318 - Modified OFFSET macro to work with 64-bit as well as 32-bit.
319 - Changed sprintf and printk format specifiers for pointers to %p.
320 - Changed some int to long type casts where needed in sprintf & printk.
321 - Added l modifiers to sprintf and printk format specifiers for longs.
322 - Removed unused local variables.
323 Rev. 1.20 June 8, 1999 DG, Qlogic
324 Changes to support RedHat release 6.0 (kernel 2.2.5).
325 - Added SCSI exclusive access lock (io_request_lock) when accessing
327 - Added changes for the new LINUX interface template. Some new error
328 handling routines have been added to the template, but for now we
329 will use the old ones.
330 - Initial Beta Release.
331 *****************************************************************************/
334 #include <linux/module.h>
336 #include <linux/types.h>
337 #include <linux/string.h>
338 #include <linux/errno.h>
339 #include <linux/kernel.h>
340 #include <linux/ioport.h>
341 #include <linux/delay.h>
342 #include <linux/timer.h>
343 #include <linux/pci.h>
344 #include <linux/proc_fs.h>
345 #include <linux/stat.h>
346 #include <linux/slab.h>
347 #include <linux/pci_ids.h>
348 #include <linux/interrupt.h>
349 #include <linux/init.h>
350 #include <linux/dma-mapping.h>
351 #include <linux/firmware.h>
355 #include <asm/byteorder.h>
356 #include <asm/processor.h>
357 #include <asm/types.h>
358 #include <asm/system.h>
360 #include <scsi/scsi.h>
361 #include <scsi/scsi_cmnd.h>
362 #include <scsi/scsi_device.h>
363 #include <scsi/scsi_host.h>
364 #include <scsi/scsi_tcq.h>
366 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
367 #include <asm/sn/io.h>
372 * Compile time Options:
373 * 0 - Disable and 1 - Enable
375 #define DEBUG_QLA1280_INTR 0
376 #define DEBUG_PRINT_NVRAM 0
377 #define DEBUG_QLA1280 0
380 * The SGI VISWS is broken and doesn't support MMIO ;-(
382 #ifdef CONFIG_X86_VISWS
383 #define MEMORY_MAPPED_IO 0
385 #define MEMORY_MAPPED_IO 1
390 #ifndef BITS_PER_LONG
391 #error "BITS_PER_LONG not defined!"
393 #if (BITS_PER_LONG == 64) || defined CONFIG_HIGHMEM
394 #define QLA_64BIT_PTR 1
398 #define pci_dma_hi32(a) ((a >> 16) >> 16)
400 #define pci_dma_hi32(a) 0
402 #define pci_dma_lo32(a) (a & 0xffffffff)
404 #define NVRAM_DELAY() udelay(500) /* 2 microseconds */
406 #if defined(__ia64__) && !defined(ia64_platform_is)
407 #define ia64_platform_is(foo) (!strcmp(x, platform_name))
411 #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020)
412 #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \
413 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240)
414 #define IS_ISP1x160(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP10160 || \
415 ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP12160)
418 static int qla1280_probe_one(struct pci_dev *, const struct pci_device_id *);
419 static void qla1280_remove_one(struct pci_dev *);
422 * QLogic Driver Support Function Prototypes.
424 static void qla1280_done(struct scsi_qla_host *);
425 static int qla1280_get_token(char *);
426 static int qla1280_setup(char *s) __init;
429 * QLogic ISP1280 Hardware Support Function Prototypes.
431 static int qla1280_load_firmware(struct scsi_qla_host *);
432 static int qla1280_init_rings(struct scsi_qla_host *);
433 static int qla1280_nvram_config(struct scsi_qla_host *);
434 static int qla1280_mailbox_command(struct scsi_qla_host *,
435 uint8_t, uint16_t *);
436 static int qla1280_bus_reset(struct scsi_qla_host *, int);
437 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
438 static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
439 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
440 static int qla1280_abort_isp(struct scsi_qla_host *);
442 static int qla1280_64bit_start_scsi(struct scsi_qla_host *, struct srb *);
444 static int qla1280_32bit_start_scsi(struct scsi_qla_host *, struct srb *);
446 static void qla1280_nv_write(struct scsi_qla_host *, uint16_t);
447 static void qla1280_poll(struct scsi_qla_host *);
448 static void qla1280_reset_adapter(struct scsi_qla_host *);
449 static void qla1280_marker(struct scsi_qla_host *, int, int, int, u8);
450 static void qla1280_isp_cmd(struct scsi_qla_host *);
451 static void qla1280_isr(struct scsi_qla_host *, struct list_head *);
452 static void qla1280_rst_aen(struct scsi_qla_host *);
453 static void qla1280_status_entry(struct scsi_qla_host *, struct response *,
455 static void qla1280_error_entry(struct scsi_qla_host *, struct response *,
457 static uint16_t qla1280_get_nvram_word(struct scsi_qla_host *, uint32_t);
458 static uint16_t qla1280_nvram_request(struct scsi_qla_host *, uint32_t);
459 static uint16_t qla1280_debounce_register(volatile uint16_t __iomem *);
460 static request_t *qla1280_req_pkt(struct scsi_qla_host *);
461 static int qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *,
463 static void qla1280_get_target_parameters(struct scsi_qla_host *,
464 struct scsi_device *);
465 static int qla1280_set_target_parameters(struct scsi_qla_host *, int, int);
468 static struct qla_driver_setup driver_setup;
471 * convert scsi data direction to request_t control flags
473 static inline uint16_t
474 qla1280_data_direction(struct scsi_cmnd *cmnd)
476 switch(cmnd->sc_data_direction) {
477 case DMA_FROM_DEVICE:
481 case DMA_BIDIRECTIONAL:
482 return BIT_5 | BIT_6;
484 * We could BUG() on default here if one of the four cases aren't
485 * met, but then again if we receive something like that from the
486 * SCSI layer we have more serious problems. This shuts up GCC.
495 static void __qla1280_print_scsi_cmd(struct scsi_cmnd * cmd);
496 static void __qla1280_dump_buffer(char *, int);
501 * insmod needs to find the variable and make it point to something
504 static char *qla1280;
506 /* insmod qla1280 options=verbose" */
507 module_param(qla1280, charp, 0);
509 __setup("qla1280=", qla1280_setup);
514 * We use the scsi_pointer structure that's included with each scsi_command
515 * to overlay our struct srb over it. qla1280_init() checks that a srb is not
516 * bigger than a scsi_pointer.
519 #define CMD_SP(Cmnd) &Cmnd->SCp
520 #define CMD_CDBLEN(Cmnd) Cmnd->cmd_len
521 #define CMD_CDBP(Cmnd) Cmnd->cmnd
522 #define CMD_SNSP(Cmnd) Cmnd->sense_buffer
523 #define CMD_SNSLEN(Cmnd) SCSI_SENSE_BUFFERSIZE
524 #define CMD_RESULT(Cmnd) Cmnd->result
525 #define CMD_HANDLE(Cmnd) Cmnd->host_scribble
526 #define CMD_REQUEST(Cmnd) Cmnd->request->cmd
528 #define CMD_HOST(Cmnd) Cmnd->device->host
529 #define SCSI_BUS_32(Cmnd) Cmnd->device->channel
530 #define SCSI_TCN_32(Cmnd) Cmnd->device->id
531 #define SCSI_LUN_32(Cmnd) Cmnd->device->lun
534 /*****************************************/
535 /* ISP Boards supported by this driver */
536 /*****************************************/
539 unsigned char name[9]; /* Board ID String */
540 int numPorts; /* Number of SCSI ports */
541 char *fwname; /* firmware name */
544 /* NOTE: the last argument in each entry is used to index ql1280_board_tbl */
545 static struct pci_device_id qla1280_pci_tbl[] = {
546 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP12160,
547 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
548 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1020,
549 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
550 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1080,
551 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
552 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1240,
553 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3},
554 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP1280,
555 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4},
556 {PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP10160,
557 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5},
560 MODULE_DEVICE_TABLE(pci, qla1280_pci_tbl);
562 static struct qla_boards ql1280_board_tbl[] = {
563 /* Name , Number of ports, FW details */
564 {"QLA12160", 2, "qlogic/12160.bin"},
565 {"QLA1040", 1, "qlogic/1040.bin"},
566 {"QLA1080", 1, "qlogic/1280.bin"},
567 {"QLA1240", 2, "qlogic/1280.bin"},
568 {"QLA1280", 2, "qlogic/1280.bin"},
569 {"QLA10160", 1, "qlogic/12160.bin"},
573 static int qla1280_verbose = 1;
576 static int ql_debug_level = 1;
577 #define dprintk(level, format, a...) \
578 do { if (ql_debug_level >= level) printk(KERN_ERR format, ##a); } while(0)
579 #define qla1280_dump_buffer(level, buf, size) \
580 if (ql_debug_level >= level) __qla1280_dump_buffer(buf, size)
581 #define qla1280_print_scsi_cmd(level, cmd) \
582 if (ql_debug_level >= level) __qla1280_print_scsi_cmd(cmd)
584 #define ql_debug_level 0
585 #define dprintk(level, format, a...) do{}while(0)
586 #define qla1280_dump_buffer(a, b, c) do{}while(0)
587 #define qla1280_print_scsi_cmd(a, b) do{}while(0)
590 #define ENTER(x) dprintk(3, "qla1280 : Entering %s()\n", x);
591 #define LEAVE(x) dprintk(3, "qla1280 : Leaving %s()\n", x);
592 #define ENTER_INTR(x) dprintk(4, "qla1280 : Entering %s()\n", x);
593 #define LEAVE_INTR(x) dprintk(4, "qla1280 : Leaving %s()\n", x);
596 static int qla1280_read_nvram(struct scsi_qla_host *ha)
603 ENTER("qla1280_read_nvram");
605 if (driver_setup.no_nvram)
608 printk(KERN_INFO "scsi(%ld): Reading NVRAM\n", ha->host_no);
610 wptr = (uint16_t *)&ha->nvram;
613 for (cnt = 0; cnt < 3; cnt++) {
614 *wptr = qla1280_get_nvram_word(ha, cnt);
615 chksum += *wptr & 0xff;
616 chksum += (*wptr >> 8) & 0xff;
620 if (nv->id0 != 'I' || nv->id1 != 'S' ||
621 nv->id2 != 'P' || nv->id3 != ' ' || nv->version < 1) {
622 dprintk(2, "Invalid nvram ID or version!\n");
625 for (; cnt < sizeof(struct nvram); cnt++) {
626 *wptr = qla1280_get_nvram_word(ha, cnt);
627 chksum += *wptr & 0xff;
628 chksum += (*wptr >> 8) & 0xff;
633 dprintk(3, "qla1280_read_nvram: NVRAM Magic ID= %c %c %c %02x"
634 " version %i\n", nv->id0, nv->id1, nv->id2, nv->id3,
639 if (!driver_setup.no_nvram)
640 printk(KERN_WARNING "scsi(%ld): Unable to identify or "
641 "validate NVRAM checksum, using default "
642 "settings\n", ha->host_no);
647 /* The firmware interface is, um, interesting, in that the
648 * actual firmware image on the chip is little endian, thus,
649 * the process of taking that image to the CPU would end up
650 * little endian. However, the firmware interface requires it
651 * to be read a word (two bytes) at a time.
653 * The net result of this would be that the word (and
654 * doubleword) quantites in the firmware would be correct, but
655 * the bytes would be pairwise reversed. Since most of the
656 * firmware quantites are, in fact, bytes, we do an extra
657 * le16_to_cpu() in the firmware read routine.
659 * The upshot of all this is that the bytes in the firmware
660 * are in the correct places, but the 16 and 32 bit quantites
661 * are still in little endian format. We fix that up below by
662 * doing extra reverses on them */
663 nv->isp_parameter = cpu_to_le16(nv->isp_parameter);
664 nv->firmware_feature.w = cpu_to_le16(nv->firmware_feature.w);
665 for(i = 0; i < MAX_BUSES; i++) {
666 nv->bus[i].selection_timeout = cpu_to_le16(nv->bus[i].selection_timeout);
667 nv->bus[i].max_queue_depth = cpu_to_le16(nv->bus[i].max_queue_depth);
669 dprintk(1, "qla1280_read_nvram: Completed Reading NVRAM\n");
670 LEAVE("qla1280_read_nvram");
675 /**************************************************************************
677 * Return a string describing the driver.
678 **************************************************************************/
680 qla1280_info(struct Scsi_Host *host)
682 static char qla1280_scsi_name_buffer[125];
684 struct scsi_qla_host *ha;
685 struct qla_boards *bdp;
687 bp = &qla1280_scsi_name_buffer[0];
688 ha = (struct scsi_qla_host *)host->hostdata;
689 bdp = &ql1280_board_tbl[ha->devnum];
690 memset(bp, 0, sizeof(qla1280_scsi_name_buffer));
693 "QLogic %s PCI to SCSI Host Adapter\n"
694 " Firmware version: %2d.%02d.%02d, Driver version %s",
695 &bdp->name[0], ha->fwver1, ha->fwver2, ha->fwver3,
700 /**************************************************************************
701 * qla1200_queuecommand
702 * Queue a command to the controller.
705 * The mid-level driver tries to ensures that queuecommand never gets invoked
706 * concurrently with itself or the interrupt handler (although the
707 * interrupt handler may call this routine as part of request-completion
708 * handling). Unfortunely, it sometimes calls the scheduler in interrupt
709 * context which is a big NO! NO!.
710 **************************************************************************/
712 qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
714 struct Scsi_Host *host = cmd->device->host;
715 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
716 struct srb *sp = (struct srb *)&cmd->SCp;
723 qla1280_print_scsi_cmd(5, cmd);
727 * Using 64 bit commands if the PCI bridge doesn't support it is a
728 * bit wasteful, however this should really only happen if one's
729 * PCI controller is completely broken, like the BCM1250. For
730 * sane hardware this is not an issue.
732 status = qla1280_64bit_start_scsi(ha, sp);
734 status = qla1280_32bit_start_scsi(ha, sp);
748 /* timer action for error action processor */
749 static void qla1280_error_wait_timeout(unsigned long __data)
751 struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
752 struct srb *sp = (struct srb *)CMD_SP(cmd);
757 static void qla1280_mailbox_timeout(unsigned long __data)
759 struct scsi_qla_host *ha = (struct scsi_qla_host *)__data;
760 struct device_reg __iomem *reg;
763 ha->mailbox_out[0] = RD_REG_WORD(®->mailbox0);
764 printk(KERN_ERR "scsi(%ld): mailbox timed out, mailbox0 %04x, "
765 "ictrl %04x, istatus %04x\n", ha->host_no, ha->mailbox_out[0],
766 RD_REG_WORD(®->ictrl), RD_REG_WORD(®->istatus));
767 complete(ha->mailbox_wait);
770 /**************************************************************************
771 * qla1200_error_action
772 * The function will attempt to perform a specified error action and
773 * wait for the results (or time out).
776 * cmd = Linux SCSI command packet of the command that cause the
778 * action = error action to take (see action_t)
784 * Resetting the bus always succeeds - is has to, otherwise the
785 * kernel will panic! Try a surgical technique - sending a BUS
786 * DEVICE RESET message - on the offending target before pulling
787 * the SCSI bus reset line.
788 **************************************************************************/
790 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
792 struct scsi_qla_host *ha;
793 int bus, target, lun;
796 unsigned char *handle;
798 DECLARE_COMPLETION_ONSTACK(wait);
799 struct timer_list timer;
801 ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
803 dprintk(4, "error_action %i, istatus 0x%04x\n", action,
804 RD_REG_WORD(&ha->iobase->istatus));
806 dprintk(4, "host_cmd 0x%04x, ictrl 0x%04x, jiffies %li\n",
807 RD_REG_WORD(&ha->iobase->host_cmd),
808 RD_REG_WORD(&ha->iobase->ictrl), jiffies);
810 ENTER("qla1280_error_action");
812 printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
813 "Handle=0x%p, action=0x%x\n",
814 ha->host_no, cmd, CMD_HANDLE(cmd), action);
817 printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
818 "si_Cmnd pointer, failing.\n");
819 LEAVE("qla1280_error_action");
823 ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
824 sp = (struct srb *)CMD_SP(cmd);
825 handle = CMD_HANDLE(cmd);
827 /* Check for pending interrupts. */
828 data = qla1280_debounce_register(&ha->iobase->istatus);
830 * The io_request_lock is held when the reset handler is called, hence
831 * the interrupt handler cannot be running in parallel as it also
832 * grabs the lock. /Jes
835 qla1280_isr(ha, &ha->done_q);
838 * Determine the suggested action that the mid-level driver wants
841 if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) {
842 if(action == ABORT_COMMAND) {
843 /* we never got this command */
844 printk(KERN_INFO "qla1280: Aborting a NULL handle\n");
845 return SUCCESS; /* no action - we don't have command */
851 bus = SCSI_BUS_32(cmd);
852 target = SCSI_TCN_32(cmd);
853 lun = SCSI_LUN_32(cmd);
855 /* Overloading result. Here it means the success or fail of the
856 * *issue* of the action. When we return from the routine, it must
857 * mean the actual success or fail of the action */
864 if ((sp->flags & SRB_ABORT_PENDING)) {
866 "scsi(): Command has a pending abort "
867 "message - ABORT_PENDING.\n");
868 /* This should technically be impossible since we
869 * now wait for abort completion */
873 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
874 if (sp == ha->outstanding_cmds[i]) {
875 dprintk(1, "qla1280: RISC aborting command\n");
876 if (qla1280_abort_command(ha, sp, i) == 0)
880 * Since we don't know what might
881 * have happend to the command, it
882 * is unsafe to remove it from the
883 * device's queue at this point.
884 * Wait and let the escalation
885 * process take care of it.
888 "scsi(%li:%i:%i:%i): Unable"
889 " to abort command!\n",
890 ha->host_no, bus, target, lun);
899 "scsi(%ld:%d:%d:%d): Queueing abort device "
900 "command.\n", ha->host_no, bus, target, lun);
901 if (qla1280_abort_device(ha, bus, target, lun) == 0)
908 "scsi(%ld:%d:%d:%d): Queueing device reset "
909 "command.\n", ha->host_no, bus, target, lun);
910 if (qla1280_device_reset(ha, bus, target) == 0)
916 printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
917 "reset.\n", ha->host_no, bus);
918 if (qla1280_bus_reset(ha, bus) == 0)
924 if (qla1280_verbose) {
926 "scsi(%ld): Issued ADAPTER RESET\n",
928 printk(KERN_INFO "scsi(%ld): I/O processing will "
929 "continue automatically\n", ha->host_no);
931 ha->flags.reset_active = 1;
933 * We restarted all of the commands automatically, so the
934 * mid-level code can expect completions momentitarily.
936 if (qla1280_abort_isp(ha) == 0)
939 ha->flags.reset_active = 0;
942 if (!list_empty(&ha->done_q))
945 /* If we didn't manage to issue the action, or we have no
946 * command to wait for, exit here */
947 if (result == FAILED || handle == NULL ||
948 handle == (unsigned char *)INVALID_HANDLE) {
950 * Clear completion queue to avoid qla1280_done() trying
951 * to complete the command at a later stage after we
952 * have exited the current context
958 /* set up a timer just in case we're really jammed */
960 timer.expires = jiffies + 4*HZ;
961 timer.data = (unsigned long)cmd;
962 timer.function = qla1280_error_wait_timeout;
965 /* wait for the action to complete (or the timer to expire) */
966 spin_unlock_irq(ha->host->host_lock);
967 wait_for_completion(&wait);
968 del_timer_sync(&timer);
969 spin_lock_irq(ha->host->host_lock);
972 /* the only action we might get a fail for is abort */
973 if (action == ABORT_COMMAND) {
974 if(sp->flags & SRB_ABORTED)
981 dprintk(1, "RESET returning %d\n", result);
983 LEAVE("qla1280_error_action");
987 /**************************************************************************
989 * Abort the specified SCSI command(s).
990 **************************************************************************/
992 qla1280_eh_abort(struct scsi_cmnd * cmd)
996 spin_lock_irq(cmd->device->host->host_lock);
997 rc = qla1280_error_action(cmd, ABORT_COMMAND);
998 spin_unlock_irq(cmd->device->host->host_lock);
1003 /**************************************************************************
1004 * qla1280_device_reset
1005 * Reset the specified SCSI device
1006 **************************************************************************/
1008 qla1280_eh_device_reset(struct scsi_cmnd *cmd)
1012 spin_lock_irq(cmd->device->host->host_lock);
1013 rc = qla1280_error_action(cmd, DEVICE_RESET);
1014 spin_unlock_irq(cmd->device->host->host_lock);
1019 /**************************************************************************
1021 * Reset the specified bus.
1022 **************************************************************************/
1024 qla1280_eh_bus_reset(struct scsi_cmnd *cmd)
1028 spin_lock_irq(cmd->device->host->host_lock);
1029 rc = qla1280_error_action(cmd, BUS_RESET);
1030 spin_unlock_irq(cmd->device->host->host_lock);
1035 /**************************************************************************
1036 * qla1280_adapter_reset
1037 * Reset the specified adapter (both channels)
1038 **************************************************************************/
1040 qla1280_eh_adapter_reset(struct scsi_cmnd *cmd)
1044 spin_lock_irq(cmd->device->host->host_lock);
1045 rc = qla1280_error_action(cmd, ADAPTER_RESET);
1046 spin_unlock_irq(cmd->device->host->host_lock);
1052 qla1280_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1053 sector_t capacity, int geom[])
1055 int heads, sectors, cylinders;
1059 cylinders = (unsigned long)capacity / (heads * sectors);
1060 if (cylinders > 1024) {
1063 cylinders = (unsigned long)capacity / (heads * sectors);
1064 /* if (cylinders > 1023)
1065 cylinders = 1023; */
1070 geom[2] = cylinders;
1076 /* disable risc and host interrupts */
1078 qla1280_disable_intrs(struct scsi_qla_host *ha)
1080 WRT_REG_WORD(&ha->iobase->ictrl, 0);
1081 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1084 /* enable risc and host interrupts */
1086 qla1280_enable_intrs(struct scsi_qla_host *ha)
1088 WRT_REG_WORD(&ha->iobase->ictrl, (ISP_EN_INT | ISP_EN_RISC));
1089 RD_REG_WORD(&ha->iobase->ictrl); /* PCI Posted Write flush */
1092 /**************************************************************************
1093 * qla1280_intr_handler
1094 * Handles the H/W interrupt
1095 **************************************************************************/
1097 qla1280_intr_handler(int irq, void *dev_id)
1099 struct scsi_qla_host *ha;
1100 struct device_reg __iomem *reg;
1104 ENTER_INTR ("qla1280_intr_handler");
1105 ha = (struct scsi_qla_host *)dev_id;
1107 spin_lock(ha->host->host_lock);
1112 qla1280_disable_intrs(ha);
1114 data = qla1280_debounce_register(®->istatus);
1115 /* Check for pending interrupts. */
1116 if (data & RISC_INT) {
1117 qla1280_isr(ha, &ha->done_q);
1120 if (!list_empty(&ha->done_q))
1123 spin_unlock(ha->host->host_lock);
1125 qla1280_enable_intrs(ha);
1127 LEAVE_INTR("qla1280_intr_handler");
1128 return IRQ_RETVAL(handled);
1133 qla1280_set_target_parameters(struct scsi_qla_host *ha, int bus, int target)
1136 uint16_t mb[MAILBOX_REGISTER_COUNT];
1142 mr = BIT_3 | BIT_2 | BIT_1 | BIT_0;
1144 /* Set Target Parameters. */
1145 mb[0] = MBC_SET_TARGET_PARAMETERS;
1146 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1147 mb[2] = nv->bus[bus].target[target].parameter.renegotiate_on_error << 8;
1148 mb[2] |= nv->bus[bus].target[target].parameter.stop_queue_on_check << 9;
1149 mb[2] |= nv->bus[bus].target[target].parameter.auto_request_sense << 10;
1150 mb[2] |= nv->bus[bus].target[target].parameter.tag_queuing << 11;
1151 mb[2] |= nv->bus[bus].target[target].parameter.enable_sync << 12;
1152 mb[2] |= nv->bus[bus].target[target].parameter.enable_wide << 13;
1153 mb[2] |= nv->bus[bus].target[target].parameter.parity_checking << 14;
1154 mb[2] |= nv->bus[bus].target[target].parameter.disconnect_allowed << 15;
1156 if (IS_ISP1x160(ha)) {
1157 mb[2] |= nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr << 5;
1158 mb[3] = (nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8);
1159 mb[6] = (nv->bus[bus].target[target].ppr_1x160.flags.ppr_options << 8) |
1160 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width;
1163 mb[3] = (nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8);
1165 mb[3] |= nv->bus[bus].target[target].sync_period;
1167 status = qla1280_mailbox_command(ha, mr, mb);
1169 /* Set Device Queue Parameters. */
1170 for (lun = 0; lun < MAX_LUNS; lun++) {
1171 mb[0] = MBC_SET_DEVICE_QUEUE;
1172 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
1174 mb[2] = nv->bus[bus].max_queue_depth;
1175 mb[3] = nv->bus[bus].target[target].execution_throttle;
1176 status |= qla1280_mailbox_command(ha, 0x0f, mb);
1180 printk(KERN_WARNING "scsi(%ld:%i:%i): "
1181 "qla1280_set_target_parameters() failed\n",
1182 ha->host_no, bus, target);
1187 /**************************************************************************
1188 * qla1280_slave_configure
1191 * Determines the queue depth for a given device. There are two ways
1192 * a queue depth can be obtained for a tagged queueing device. One
1193 * way is the default queue depth which is determined by whether
1194 * If it is defined, then it is used
1195 * as the default queue depth. Otherwise, we use either 4 or 8 as the
1196 * default queue depth (dependent on the number of hardware SCBs).
1197 **************************************************************************/
1199 qla1280_slave_configure(struct scsi_device *device)
1201 struct scsi_qla_host *ha;
1202 int default_depth = 3;
1203 int bus = device->channel;
1204 int target = device->id;
1207 unsigned long flags;
1209 ha = (struct scsi_qla_host *)device->host->hostdata;
1212 if (qla1280_check_for_dead_scsi_bus(ha, bus))
1215 if (device->tagged_supported &&
1216 (ha->bus_settings[bus].qtag_enables & (BIT_0 << target))) {
1217 scsi_adjust_queue_depth(device, MSG_ORDERED_TAG,
1218 ha->bus_settings[bus].hiwat);
1220 scsi_adjust_queue_depth(device, 0, default_depth);
1223 nv->bus[bus].target[target].parameter.enable_sync = device->sdtr;
1224 nv->bus[bus].target[target].parameter.enable_wide = device->wdtr;
1225 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = device->ppr;
1227 if (driver_setup.no_sync ||
1228 (driver_setup.sync_mask &&
1229 (~driver_setup.sync_mask & (1 << target))))
1230 nv->bus[bus].target[target].parameter.enable_sync = 0;
1231 if (driver_setup.no_wide ||
1232 (driver_setup.wide_mask &&
1233 (~driver_setup.wide_mask & (1 << target))))
1234 nv->bus[bus].target[target].parameter.enable_wide = 0;
1235 if (IS_ISP1x160(ha)) {
1236 if (driver_setup.no_ppr ||
1237 (driver_setup.ppr_mask &&
1238 (~driver_setup.ppr_mask & (1 << target))))
1239 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 0;
1242 spin_lock_irqsave(ha->host->host_lock, flags);
1243 if (nv->bus[bus].target[target].parameter.enable_sync)
1244 status = qla1280_set_target_parameters(ha, bus, target);
1245 qla1280_get_target_parameters(ha, device);
1246 spin_unlock_irqrestore(ha->host->host_lock, flags);
1253 * Process completed commands.
1256 * ha = adapter block pointer.
1259 qla1280_done(struct scsi_qla_host *ha)
1262 struct list_head *done_q;
1263 int bus, target, lun;
1264 struct scsi_cmnd *cmd;
1266 ENTER("qla1280_done");
1268 done_q = &ha->done_q;
1270 while (!list_empty(done_q)) {
1271 sp = list_entry(done_q->next, struct srb, list);
1273 list_del(&sp->list);
1276 bus = SCSI_BUS_32(cmd);
1277 target = SCSI_TCN_32(cmd);
1278 lun = SCSI_LUN_32(cmd);
1280 switch ((CMD_RESULT(cmd) >> 16)) {
1282 /* Issue marker command. */
1283 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
1286 sp->flags &= ~SRB_ABORT_PENDING;
1287 sp->flags |= SRB_ABORTED;
1288 if (sp->flags & SRB_TIMEOUT)
1289 CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
1295 /* Release memory used for this I/O */
1296 scsi_dma_unmap(cmd);
1298 /* Call the mid-level driver interrupt handler */
1299 CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
1302 (*(cmd)->scsi_done)(cmd);
1304 if(sp->wait != NULL)
1307 LEAVE("qla1280_done");
1311 * Translates a ISP error to a Linux SCSI error
1314 qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
1316 int host_status = DID_ERROR;
1317 uint16_t comp_status = le16_to_cpu(sts->comp_status);
1318 uint16_t state_flags = le16_to_cpu(sts->state_flags);
1319 uint32_t residual_length = le32_to_cpu(sts->residual_length);
1320 uint16_t scsi_status = le16_to_cpu(sts->scsi_status);
1321 #if DEBUG_QLA1280_INTR
1322 static char *reason[] = {
1334 #endif /* DEBUG_QLA1280_INTR */
1336 ENTER("qla1280_return_status");
1338 #if DEBUG_QLA1280_INTR
1340 dprintk(1, "qla1280_return_status: compl status = 0x%04x\n",
1345 switch (comp_status) {
1347 host_status = DID_OK;
1351 if (!(state_flags & SF_GOT_BUS))
1352 host_status = DID_NO_CONNECT;
1353 else if (!(state_flags & SF_GOT_TARGET))
1354 host_status = DID_BAD_TARGET;
1355 else if (!(state_flags & SF_SENT_CDB))
1356 host_status = DID_ERROR;
1357 else if (!(state_flags & SF_TRANSFERRED_DATA))
1358 host_status = DID_ERROR;
1359 else if (!(state_flags & SF_GOT_STATUS))
1360 host_status = DID_ERROR;
1361 else if (!(state_flags & SF_GOT_SENSE))
1362 host_status = DID_ERROR;
1366 host_status = DID_RESET;
1370 host_status = DID_ABORT;
1374 host_status = DID_TIME_OUT;
1377 case CS_DATA_OVERRUN:
1378 dprintk(2, "Data overrun 0x%x\n", residual_length);
1379 dprintk(2, "qla1280_return_status: response packet data\n");
1380 qla1280_dump_buffer(2, (char *)sts, RESPONSE_ENTRY_SIZE);
1381 host_status = DID_ERROR;
1384 case CS_DATA_UNDERRUN:
1385 if ((scsi_bufflen(cp) - residual_length) <
1388 "scsi: Underflow detected - retrying "
1390 host_status = DID_ERROR;
1392 scsi_set_resid(cp, residual_length);
1393 host_status = DID_OK;
1398 host_status = DID_ERROR;
1402 #if DEBUG_QLA1280_INTR
1403 dprintk(1, "qla1280 ISP status: host status (%s) scsi status %x\n",
1404 reason[host_status], scsi_status);
1407 LEAVE("qla1280_return_status");
1409 return (scsi_status & 0xff) | (host_status << 16);
1412 /****************************************************************************/
1413 /* QLogic ISP1280 Hardware Support Functions. */
1414 /****************************************************************************/
1417 * qla1280_initialize_adapter
1421 * ha = adapter block pointer.
1426 static int __devinit
1427 qla1280_initialize_adapter(struct scsi_qla_host *ha)
1429 struct device_reg __iomem *reg;
1432 unsigned long flags;
1434 ENTER("qla1280_initialize_adapter");
1436 /* Clear adapter flags. */
1437 ha->flags.online = 0;
1438 ha->flags.disable_host_adapter = 0;
1439 ha->flags.reset_active = 0;
1440 ha->flags.abort_isp_active = 0;
1442 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
1443 if (ia64_platform_is("sn2")) {
1444 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
1445 "dual channel lockup workaround\n", ha->host_no);
1446 ha->flags.use_pci_vchannel = 1;
1447 driver_setup.no_nvram = 1;
1451 /* TODO: implement support for the 1040 nvram format */
1453 driver_setup.no_nvram = 1;
1455 dprintk(1, "Configure PCI space for adapter...\n");
1459 /* Insure mailbox registers are free. */
1460 WRT_REG_WORD(®->semaphore, 0);
1461 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
1462 WRT_REG_WORD(®->host_cmd, HC_CLR_HOST_INT);
1463 RD_REG_WORD(®->host_cmd);
1465 if (qla1280_read_nvram(ha)) {
1466 dprintk(2, "qla1280_initialize_adapter: failed to read "
1471 * It's necessary to grab the spin here as qla1280_mailbox_command
1472 * needs to be able to drop the lock unconditionally to wait
1475 spin_lock_irqsave(ha->host->host_lock, flags);
1477 status = qla1280_load_firmware(ha);
1479 printk(KERN_ERR "scsi(%li): initialize: pci probe failed!\n",
1484 /* Setup adapter based on NVRAM parameters. */
1485 dprintk(1, "scsi(%ld): Configure NVRAM parameters\n", ha->host_no);
1486 qla1280_nvram_config(ha);
1488 if (ha->flags.disable_host_adapter) {
1493 status = qla1280_init_rings(ha);
1497 /* Issue SCSI reset, if we can't reset twice then bus is dead */
1498 for (bus = 0; bus < ha->ports; bus++) {
1499 if (!ha->bus_settings[bus].disable_scsi_reset &&
1500 qla1280_bus_reset(ha, bus) &&
1501 qla1280_bus_reset(ha, bus))
1502 ha->bus_settings[bus].scsi_bus_dead = 1;
1505 ha->flags.online = 1;
1507 spin_unlock_irqrestore(ha->host->host_lock, flags);
1510 dprintk(2, "qla1280_initialize_adapter: **** FAILED ****\n");
1512 LEAVE("qla1280_initialize_adapter");
1518 * Test chip for proper operation.
1521 * ha = adapter block pointer.
1527 qla1280_chip_diag(struct scsi_qla_host *ha)
1529 uint16_t mb[MAILBOX_REGISTER_COUNT];
1530 struct device_reg __iomem *reg = ha->iobase;
1534 dprintk(3, "qla1280_chip_diag: testing device at 0x%p \n", ®->id_l);
1536 dprintk(1, "scsi(%ld): Verifying chip\n", ha->host_no);
1538 /* Soft reset chip and wait for it to finish. */
1539 WRT_REG_WORD(®->ictrl, ISP_RESET);
1542 * We can't do a traditional PCI write flush here by reading
1543 * back the register. The card will not respond once the reset
1544 * is in action and we end up with a machine check exception
1545 * instead. Nothing to do but wait and hope for the best.
1546 * A portable pci_write_flush(pdev) call would be very useful here.
1549 data = qla1280_debounce_register(®->ictrl);
1551 * Yet another QLogic gem ;-(
1553 for (cnt = 1000000; cnt && data & ISP_RESET; cnt--) {
1555 data = RD_REG_WORD(®->ictrl);
1561 /* Reset register cleared by chip reset. */
1562 dprintk(3, "qla1280_chip_diag: reset register cleared by chip reset\n");
1564 WRT_REG_WORD(®->cfg_1, 0);
1566 /* Reset RISC and disable BIOS which
1567 allows RISC to execute out of RAM. */
1568 WRT_REG_WORD(®->host_cmd, HC_RESET_RISC |
1569 HC_RELEASE_RISC | HC_DISABLE_BIOS);
1571 RD_REG_WORD(®->id_l); /* Flush PCI write */
1572 data = qla1280_debounce_register(®->mailbox0);
1575 * I *LOVE* this code!
1577 for (cnt = 1000000; cnt && data == MBS_BUSY; cnt--) {
1579 data = RD_REG_WORD(®->mailbox0);
1585 /* Check product ID of chip */
1586 dprintk(3, "qla1280_chip_diag: Checking product ID of chip\n");
1588 if (RD_REG_WORD(®->mailbox1) != PROD_ID_1 ||
1589 (RD_REG_WORD(®->mailbox2) != PROD_ID_2 &&
1590 RD_REG_WORD(®->mailbox2) != PROD_ID_2a) ||
1591 RD_REG_WORD(®->mailbox3) != PROD_ID_3 ||
1592 RD_REG_WORD(®->mailbox4) != PROD_ID_4) {
1593 printk(KERN_INFO "qla1280: Wrong product ID = "
1594 "0x%x,0x%x,0x%x,0x%x\n",
1595 RD_REG_WORD(®->mailbox1),
1596 RD_REG_WORD(®->mailbox2),
1597 RD_REG_WORD(®->mailbox3),
1598 RD_REG_WORD(®->mailbox4));
1603 * Enable ints early!!!
1605 qla1280_enable_intrs(ha);
1607 dprintk(1, "qla1280_chip_diag: Checking mailboxes of chip\n");
1608 /* Wrap Incoming Mailboxes Test. */
1609 mb[0] = MBC_MAILBOX_REGISTER_TEST;
1618 status = qla1280_mailbox_command(ha, 0xff, mb);
1622 if (mb[1] != 0xAAAA || mb[2] != 0x5555 || mb[3] != 0xAA55 ||
1623 mb[4] != 0x55AA || mb[5] != 0xA5A5 || mb[6] != 0x5A5A ||
1625 printk(KERN_INFO "qla1280: Failed mbox check\n");
1629 dprintk(3, "qla1280_chip_diag: exiting normally\n");
1632 dprintk(2, "qla1280_chip_diag: **** FAILED ****\n");
1637 qla1280_load_firmware_pio(struct scsi_qla_host *ha)
1639 const struct firmware *fw;
1640 const __le16 *fw_data;
1641 uint16_t risc_address, risc_code_size;
1642 uint16_t mb[MAILBOX_REGISTER_COUNT], i;
1645 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
1648 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1649 ql1280_board_tbl[ha->devnum].fwname, err);
1652 if ((fw->size % 2) || (fw->size < 6)) {
1653 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
1654 fw->size, ql1280_board_tbl[ha->devnum].fwname);
1658 ha->fwver1 = fw->data[0];
1659 ha->fwver2 = fw->data[1];
1660 ha->fwver3 = fw->data[2];
1661 fw_data = (const __le16 *)&fw->data[0];
1662 ha->fwstart = __le16_to_cpu(fw_data[2]);
1664 /* Load RISC code. */
1665 risc_address = ha->fwstart;
1666 fw_data = (const __le16 *)&fw->data[6];
1667 risc_code_size = (fw->size - 6) / 2;
1669 for (i = 0; i < risc_code_size; i++) {
1670 mb[0] = MBC_WRITE_RAM_WORD;
1671 mb[1] = risc_address + i;
1672 mb[2] = __le16_to_cpu(fw_data[i]);
1674 err = qla1280_mailbox_command(ha, BIT_0 | BIT_1 | BIT_2, mb);
1676 printk(KERN_ERR "scsi(%li): Failed to load firmware\n",
1682 release_firmware(fw);
1686 #define DUMP_IT_BACK 0 /* for debug of RISC loading */
1688 qla1280_load_firmware_dma(struct scsi_qla_host *ha)
1690 const struct firmware *fw;
1691 const __le16 *fw_data;
1692 uint16_t risc_address, risc_code_size;
1693 uint16_t mb[MAILBOX_REGISTER_COUNT], cnt;
1694 int err = 0, num, i;
1699 tbuf = pci_alloc_consistent(ha->pdev, 8000, &p_tbuf);
1704 err = request_firmware(&fw, ql1280_board_tbl[ha->devnum].fwname,
1707 printk(KERN_ERR "Failed to load image \"%s\" err %d\n",
1708 ql1280_board_tbl[ha->devnum].fwname, err);
1711 if ((fw->size % 2) || (fw->size < 6)) {
1712 printk(KERN_ERR "Bogus length %zu in image \"%s\"\n",
1713 fw->size, ql1280_board_tbl[ha->devnum].fwname);
1717 ha->fwver1 = fw->data[0];
1718 ha->fwver2 = fw->data[1];
1719 ha->fwver3 = fw->data[2];
1720 fw_data = (const __le16 *)&fw->data[0];
1721 ha->fwstart = __le16_to_cpu(fw_data[2]);
1723 /* Load RISC code. */
1724 risc_address = ha->fwstart;
1725 fw_data = (const __le16 *)&fw->data[6];
1726 risc_code_size = (fw->size - 6) / 2;
1728 dprintk(1, "%s: DMA RISC code (%i) words\n",
1729 __func__, risc_code_size);
1732 while (risc_code_size > 0) {
1733 int warn __attribute__((unused)) = 0;
1737 if (cnt > risc_code_size)
1738 cnt = risc_code_size;
1740 dprintk(2, "qla1280_setup_chip: loading risc @ =(0x%p),"
1742 fw_data, cnt, num, risc_address);
1743 for(i = 0; i < cnt; i++)
1744 ((__le16 *)ha->request_ring)[i] = fw_data[i];
1746 mb[0] = MBC_LOAD_RAM;
1747 mb[1] = risc_address;
1749 mb[3] = ha->request_dma & 0xffff;
1750 mb[2] = (ha->request_dma >> 16) & 0xffff;
1751 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1752 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1753 dprintk(2, "%s: op=%d 0x%p = 0x%4x,0x%4x,0x%4x,0x%4x\n",
1755 (void *)(long)ha->request_dma,
1756 mb[6], mb[7], mb[2], mb[3]);
1757 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1760 printk(KERN_ERR "scsi(%li): Failed to load partial "
1761 "segment of f\n", ha->host_no);
1766 mb[0] = MBC_DUMP_RAM;
1767 mb[1] = risc_address;
1769 mb[3] = p_tbuf & 0xffff;
1770 mb[2] = (p_tbuf >> 16) & 0xffff;
1771 mb[7] = pci_dma_hi32(p_tbuf) & 0xffff;
1772 mb[6] = pci_dma_hi32(p_tbuf) >> 16;
1774 err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 |
1778 "Failed to dump partial segment of f/w\n");
1781 sp = (uint8_t *)ha->request_ring;
1782 for (i = 0; i < (cnt << 1); i++) {
1783 if (tbuf[i] != sp[i] && warn++ < 10) {
1784 printk(KERN_ERR "%s: FW compare error @ "
1785 "byte(0x%x) loop#=%x\n",
1787 printk(KERN_ERR "%s: FWbyte=%x "
1789 __func__, sp[i], tbuf[i]);
1794 risc_address += cnt;
1795 risc_code_size = risc_code_size - cnt;
1796 fw_data = fw_data + cnt;
1802 pci_free_consistent(ha->pdev, 8000, tbuf, p_tbuf);
1804 release_firmware(fw);
1809 qla1280_start_firmware(struct scsi_qla_host *ha)
1811 uint16_t mb[MAILBOX_REGISTER_COUNT];
1814 dprintk(1, "%s: Verifying checksum of loaded RISC code.\n",
1817 /* Verify checksum of loaded RISC code. */
1818 mb[0] = MBC_VERIFY_CHECKSUM;
1819 /* mb[1] = ql12_risc_code_addr01; */
1820 mb[1] = ha->fwstart;
1821 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
1823 printk(KERN_ERR "scsi(%li): RISC checksum failed.\n", ha->host_no);
1827 /* Start firmware execution. */
1828 dprintk(1, "%s: start firmware running.\n", __func__);
1829 mb[0] = MBC_EXECUTE_FIRMWARE;
1830 mb[1] = ha->fwstart;
1831 err = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
1833 printk(KERN_ERR "scsi(%li): Failed to start firmware\n",
1841 qla1280_load_firmware(struct scsi_qla_host *ha)
1845 err = qla1280_chip_diag(ha);
1849 err = qla1280_load_firmware_pio(ha);
1851 err = qla1280_load_firmware_dma(ha);
1854 err = qla1280_start_firmware(ha);
1863 * ha = adapter block pointer.
1864 * ha->request_ring = request ring virtual address
1865 * ha->response_ring = response ring virtual address
1866 * ha->request_dma = request ring physical address
1867 * ha->response_dma = response ring physical address
1873 qla1280_init_rings(struct scsi_qla_host *ha)
1875 uint16_t mb[MAILBOX_REGISTER_COUNT];
1878 ENTER("qla1280_init_rings");
1880 /* Clear outstanding commands array. */
1881 memset(ha->outstanding_cmds, 0,
1882 sizeof(struct srb *) * MAX_OUTSTANDING_COMMANDS);
1884 /* Initialize request queue. */
1885 ha->request_ring_ptr = ha->request_ring;
1886 ha->req_ring_index = 0;
1887 ha->req_q_cnt = REQUEST_ENTRY_CNT;
1888 /* mb[0] = MBC_INIT_REQUEST_QUEUE; */
1889 mb[0] = MBC_INIT_REQUEST_QUEUE_A64;
1890 mb[1] = REQUEST_ENTRY_CNT;
1891 mb[3] = ha->request_dma & 0xffff;
1892 mb[2] = (ha->request_dma >> 16) & 0xffff;
1894 mb[7] = pci_dma_hi32(ha->request_dma) & 0xffff;
1895 mb[6] = pci_dma_hi32(ha->request_dma) >> 16;
1896 if (!(status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_4 |
1897 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1899 /* Initialize response queue. */
1900 ha->response_ring_ptr = ha->response_ring;
1901 ha->rsp_ring_index = 0;
1902 /* mb[0] = MBC_INIT_RESPONSE_QUEUE; */
1903 mb[0] = MBC_INIT_RESPONSE_QUEUE_A64;
1904 mb[1] = RESPONSE_ENTRY_CNT;
1905 mb[3] = ha->response_dma & 0xffff;
1906 mb[2] = (ha->response_dma >> 16) & 0xffff;
1908 mb[7] = pci_dma_hi32(ha->response_dma) & 0xffff;
1909 mb[6] = pci_dma_hi32(ha->response_dma) >> 16;
1910 status = qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_5 |
1911 BIT_3 | BIT_2 | BIT_1 | BIT_0,
1916 dprintk(2, "qla1280_init_rings: **** FAILED ****\n");
1918 LEAVE("qla1280_init_rings");
1923 qla1280_print_settings(struct nvram *nv)
1925 dprintk(1, "qla1280 : initiator scsi id bus[0]=%d\n",
1926 nv->bus[0].config_1.initiator_id);
1927 dprintk(1, "qla1280 : initiator scsi id bus[1]=%d\n",
1928 nv->bus[1].config_1.initiator_id);
1930 dprintk(1, "qla1280 : bus reset delay[0]=%d\n",
1931 nv->bus[0].bus_reset_delay);
1932 dprintk(1, "qla1280 : bus reset delay[1]=%d\n",
1933 nv->bus[1].bus_reset_delay);
1935 dprintk(1, "qla1280 : retry count[0]=%d\n", nv->bus[0].retry_count);
1936 dprintk(1, "qla1280 : retry delay[0]=%d\n", nv->bus[0].retry_delay);
1937 dprintk(1, "qla1280 : retry count[1]=%d\n", nv->bus[1].retry_count);
1938 dprintk(1, "qla1280 : retry delay[1]=%d\n", nv->bus[1].retry_delay);
1940 dprintk(1, "qla1280 : async data setup time[0]=%d\n",
1941 nv->bus[0].config_2.async_data_setup_time);
1942 dprintk(1, "qla1280 : async data setup time[1]=%d\n",
1943 nv->bus[1].config_2.async_data_setup_time);
1945 dprintk(1, "qla1280 : req/ack active negation[0]=%d\n",
1946 nv->bus[0].config_2.req_ack_active_negation);
1947 dprintk(1, "qla1280 : req/ack active negation[1]=%d\n",
1948 nv->bus[1].config_2.req_ack_active_negation);
1950 dprintk(1, "qla1280 : data line active negation[0]=%d\n",
1951 nv->bus[0].config_2.data_line_active_negation);
1952 dprintk(1, "qla1280 : data line active negation[1]=%d\n",
1953 nv->bus[1].config_2.data_line_active_negation);
1955 dprintk(1, "qla1280 : disable loading risc code=%d\n",
1956 nv->cntr_flags_1.disable_loading_risc_code);
1958 dprintk(1, "qla1280 : enable 64bit addressing=%d\n",
1959 nv->cntr_flags_1.enable_64bit_addressing);
1961 dprintk(1, "qla1280 : selection timeout limit[0]=%d\n",
1962 nv->bus[0].selection_timeout);
1963 dprintk(1, "qla1280 : selection timeout limit[1]=%d\n",
1964 nv->bus[1].selection_timeout);
1966 dprintk(1, "qla1280 : max queue depth[0]=%d\n",
1967 nv->bus[0].max_queue_depth);
1968 dprintk(1, "qla1280 : max queue depth[1]=%d\n",
1969 nv->bus[1].max_queue_depth);
1973 qla1280_set_target_defaults(struct scsi_qla_host *ha, int bus, int target)
1975 struct nvram *nv = &ha->nvram;
1977 nv->bus[bus].target[target].parameter.renegotiate_on_error = 1;
1978 nv->bus[bus].target[target].parameter.auto_request_sense = 1;
1979 nv->bus[bus].target[target].parameter.tag_queuing = 1;
1980 nv->bus[bus].target[target].parameter.enable_sync = 1;
1981 #if 1 /* Some SCSI Processors do not seem to like this */
1982 nv->bus[bus].target[target].parameter.enable_wide = 1;
1984 nv->bus[bus].target[target].execution_throttle =
1985 nv->bus[bus].max_queue_depth - 1;
1986 nv->bus[bus].target[target].parameter.parity_checking = 1;
1987 nv->bus[bus].target[target].parameter.disconnect_allowed = 1;
1989 if (IS_ISP1x160(ha)) {
1990 nv->bus[bus].target[target].flags.flags1x160.device_enable = 1;
1991 nv->bus[bus].target[target].flags.flags1x160.sync_offset = 0x0e;
1992 nv->bus[bus].target[target].sync_period = 9;
1993 nv->bus[bus].target[target].ppr_1x160.flags.enable_ppr = 1;
1994 nv->bus[bus].target[target].ppr_1x160.flags.ppr_options = 2;
1995 nv->bus[bus].target[target].ppr_1x160.flags.ppr_bus_width = 1;
1997 nv->bus[bus].target[target].flags.flags1x80.device_enable = 1;
1998 nv->bus[bus].target[target].flags.flags1x80.sync_offset = 12;
1999 nv->bus[bus].target[target].sync_period = 10;
2004 qla1280_set_defaults(struct scsi_qla_host *ha)
2006 struct nvram *nv = &ha->nvram;
2009 dprintk(1, "Using defaults for NVRAM: \n");
2010 memset(nv, 0, sizeof(struct nvram));
2012 /* nv->cntr_flags_1.disable_loading_risc_code = 1; */
2013 nv->firmware_feature.f.enable_fast_posting = 1;
2014 nv->firmware_feature.f.disable_synchronous_backoff = 1;
2015 nv->termination.scsi_bus_0_control = 3;
2016 nv->termination.scsi_bus_1_control = 3;
2017 nv->termination.auto_term_support = 1;
2020 * Set default FIFO magic - What appropriate values would be here
2021 * is unknown. This is what I have found testing with 12160s.
2023 * Now, I would love the magic decoder ring for this one, the
2024 * header file provided by QLogic seems to be bogus or incomplete
2027 nv->isp_config.burst_enable = 1;
2029 nv->isp_config.fifo_threshold |= 3;
2031 nv->isp_config.fifo_threshold |= 4;
2033 if (IS_ISP1x160(ha))
2034 nv->isp_parameter = 0x01; /* fast memory enable */
2036 for (bus = 0; bus < MAX_BUSES; bus++) {
2037 nv->bus[bus].config_1.initiator_id = 7;
2038 nv->bus[bus].config_2.req_ack_active_negation = 1;
2039 nv->bus[bus].config_2.data_line_active_negation = 1;
2040 nv->bus[bus].selection_timeout = 250;
2041 nv->bus[bus].max_queue_depth = 32;
2043 if (IS_ISP1040(ha)) {
2044 nv->bus[bus].bus_reset_delay = 3;
2045 nv->bus[bus].config_2.async_data_setup_time = 6;
2046 nv->bus[bus].retry_delay = 1;
2048 nv->bus[bus].bus_reset_delay = 5;
2049 nv->bus[bus].config_2.async_data_setup_time = 8;
2052 for (target = 0; target < MAX_TARGETS; target++)
2053 qla1280_set_target_defaults(ha, bus, target);
2058 qla1280_config_target(struct scsi_qla_host *ha, int bus, int target)
2060 struct nvram *nv = &ha->nvram;
2061 uint16_t mb[MAILBOX_REGISTER_COUNT];
2065 /* Set Target Parameters. */
2066 mb[0] = MBC_SET_TARGET_PARAMETERS;
2067 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2070 * Do not enable sync and ppr for the initial INQUIRY run. We
2071 * enable this later if we determine the target actually
2074 mb[2] = (TP_RENEGOTIATE | TP_AUTO_REQUEST_SENSE | TP_TAGGED_QUEUE
2075 | TP_WIDE | TP_PARITY | TP_DISCONNECT);
2077 if (IS_ISP1x160(ha))
2078 mb[3] = nv->bus[bus].target[target].flags.flags1x160.sync_offset << 8;
2080 mb[3] = nv->bus[bus].target[target].flags.flags1x80.sync_offset << 8;
2081 mb[3] |= nv->bus[bus].target[target].sync_period;
2082 status = qla1280_mailbox_command(ha, 0x0f, mb);
2084 /* Save Tag queuing enable flag. */
2085 flag = (BIT_0 << target);
2086 if (nv->bus[bus].target[target].parameter.tag_queuing)
2087 ha->bus_settings[bus].qtag_enables |= flag;
2089 /* Save Device enable flag. */
2090 if (IS_ISP1x160(ha)) {
2091 if (nv->bus[bus].target[target].flags.flags1x160.device_enable)
2092 ha->bus_settings[bus].device_enables |= flag;
2093 ha->bus_settings[bus].lun_disables |= 0;
2095 if (nv->bus[bus].target[target].flags.flags1x80.device_enable)
2096 ha->bus_settings[bus].device_enables |= flag;
2097 /* Save LUN disable flag. */
2098 if (nv->bus[bus].target[target].flags.flags1x80.lun_disable)
2099 ha->bus_settings[bus].lun_disables |= flag;
2102 /* Set Device Queue Parameters. */
2103 for (lun = 0; lun < MAX_LUNS; lun++) {
2104 mb[0] = MBC_SET_DEVICE_QUEUE;
2105 mb[1] = (uint16_t)((bus ? target | BIT_7 : target) << 8);
2107 mb[2] = nv->bus[bus].max_queue_depth;
2108 mb[3] = nv->bus[bus].target[target].execution_throttle;
2109 status |= qla1280_mailbox_command(ha, 0x0f, mb);
2116 qla1280_config_bus(struct scsi_qla_host *ha, int bus)
2118 struct nvram *nv = &ha->nvram;
2119 uint16_t mb[MAILBOX_REGISTER_COUNT];
2122 /* SCSI Reset Disable. */
2123 ha->bus_settings[bus].disable_scsi_reset =
2124 nv->bus[bus].config_1.scsi_reset_disable;
2127 ha->bus_settings[bus].id = nv->bus[bus].config_1.initiator_id;
2128 mb[0] = MBC_SET_INITIATOR_ID;
2129 mb[1] = bus ? ha->bus_settings[bus].id | BIT_7 :
2130 ha->bus_settings[bus].id;
2131 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2134 ha->bus_settings[bus].bus_reset_delay =
2135 nv->bus[bus].bus_reset_delay;
2137 /* Command queue depth per device. */
2138 ha->bus_settings[bus].hiwat = nv->bus[bus].max_queue_depth - 1;
2140 /* Set target parameters. */
2141 for (target = 0; target < MAX_TARGETS; target++)
2142 status |= qla1280_config_target(ha, bus, target);
2148 qla1280_nvram_config(struct scsi_qla_host *ha)
2150 struct device_reg __iomem *reg = ha->iobase;
2151 struct nvram *nv = &ha->nvram;
2152 int bus, target, status = 0;
2153 uint16_t mb[MAILBOX_REGISTER_COUNT];
2155 ENTER("qla1280_nvram_config");
2157 if (ha->nvram_valid) {
2158 /* Always force AUTO sense for LINUX SCSI */
2159 for (bus = 0; bus < MAX_BUSES; bus++)
2160 for (target = 0; target < MAX_TARGETS; target++) {
2161 nv->bus[bus].target[target].parameter.
2162 auto_request_sense = 1;
2165 qla1280_set_defaults(ha);
2168 qla1280_print_settings(nv);
2170 /* Disable RISC load of firmware. */
2171 ha->flags.disable_risc_code_load =
2172 nv->cntr_flags_1.disable_loading_risc_code;
2174 if (IS_ISP1040(ha)) {
2175 uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
2177 hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
2179 cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
2180 cdma_conf = RD_REG_WORD(®->cdma_cfg);
2181 ddma_conf = RD_REG_WORD(®->ddma_cfg);
2183 /* Busted fifo, says mjacob. */
2184 if (hwrev != ISP_CFG0_1040A)
2185 cfg1 |= nv->isp_config.fifo_threshold << 4;
2187 cfg1 |= nv->isp_config.burst_enable << 2;
2188 WRT_REG_WORD(®->cfg_1, cfg1);
2190 WRT_REG_WORD(®->cdma_cfg, cdma_conf | CDMA_CONF_BENAB);
2191 WRT_REG_WORD(®->ddma_cfg, cdma_conf | DDMA_CONF_BENAB);
2193 uint16_t cfg1, term;
2195 /* Set ISP hardware DMA burst */
2196 cfg1 = nv->isp_config.fifo_threshold << 4;
2197 cfg1 |= nv->isp_config.burst_enable << 2;
2198 /* Enable DMA arbitration on dual channel controllers */
2201 WRT_REG_WORD(®->cfg_1, cfg1);
2203 /* Set SCSI termination. */
2204 WRT_REG_WORD(®->gpio_enable,
2205 BIT_7 | BIT_3 | BIT_2 | BIT_1 | BIT_0);
2206 term = nv->termination.scsi_bus_1_control;
2207 term |= nv->termination.scsi_bus_0_control << 2;
2208 term |= nv->termination.auto_term_support << 7;
2209 RD_REG_WORD(®->id_l); /* Flush PCI write */
2210 WRT_REG_WORD(®->gpio_data, term);
2212 RD_REG_WORD(®->id_l); /* Flush PCI write */
2214 /* ISP parameter word. */
2215 mb[0] = MBC_SET_SYSTEM_PARAMETER;
2216 mb[1] = nv->isp_parameter;
2217 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2219 if (IS_ISP1x40(ha)) {
2220 /* clock rate - for qla1240 and older, only */
2221 mb[0] = MBC_SET_CLOCK_RATE;
2223 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2226 /* Firmware feature word. */
2227 mb[0] = MBC_SET_FIRMWARE_FEATURES;
2228 mb[1] = nv->firmware_feature.f.enable_fast_posting;
2229 mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1;
2230 mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5;
2231 #if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2)
2232 if (ia64_platform_is("sn2")) {
2233 printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA "
2234 "workaround\n", ha->host_no);
2235 mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */
2238 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2240 /* Retry count and delay. */
2241 mb[0] = MBC_SET_RETRY_COUNT;
2242 mb[1] = nv->bus[0].retry_count;
2243 mb[2] = nv->bus[0].retry_delay;
2244 mb[6] = nv->bus[1].retry_count;
2245 mb[7] = nv->bus[1].retry_delay;
2246 status |= qla1280_mailbox_command(ha, BIT_7 | BIT_6 | BIT_2 |
2247 BIT_1 | BIT_0, &mb[0]);
2249 /* ASYNC data setup time. */
2250 mb[0] = MBC_SET_ASYNC_DATA_SETUP;
2251 mb[1] = nv->bus[0].config_2.async_data_setup_time;
2252 mb[2] = nv->bus[1].config_2.async_data_setup_time;
2253 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2255 /* Active negation states. */
2256 mb[0] = MBC_SET_ACTIVE_NEGATION;
2258 if (nv->bus[0].config_2.req_ack_active_negation)
2260 if (nv->bus[0].config_2.data_line_active_negation)
2263 if (nv->bus[1].config_2.req_ack_active_negation)
2265 if (nv->bus[1].config_2.data_line_active_negation)
2267 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2269 mb[0] = MBC_SET_DATA_OVERRUN_RECOVERY;
2270 mb[1] = 2; /* Reset SCSI bus and return all outstanding IO */
2271 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2274 mb[0] = MBC_SET_PCI_CONTROL;
2275 mb[1] = BIT_1; /* Data DMA Channel Burst Enable */
2276 mb[2] = BIT_1; /* Command DMA Channel Burst Enable */
2277 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2279 mb[0] = MBC_SET_TAG_AGE_LIMIT;
2281 status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb);
2283 /* Selection timeout. */
2284 mb[0] = MBC_SET_SELECTION_TIMEOUT;
2285 mb[1] = nv->bus[0].selection_timeout;
2286 mb[2] = nv->bus[1].selection_timeout;
2287 status |= qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, mb);
2289 for (bus = 0; bus < ha->ports; bus++)
2290 status |= qla1280_config_bus(ha, bus);
2293 dprintk(2, "qla1280_nvram_config: **** FAILED ****\n");
2295 LEAVE("qla1280_nvram_config");
2300 * Get NVRAM data word
2301 * Calculates word position in NVRAM and calls request routine to
2302 * get the word from NVRAM.
2305 * ha = adapter block pointer.
2306 * address = NVRAM word address.
2312 qla1280_get_nvram_word(struct scsi_qla_host *ha, uint32_t address)
2317 nv_cmd = address << 16;
2318 nv_cmd |= NV_READ_OP;
2320 data = le16_to_cpu(qla1280_nvram_request(ha, nv_cmd));
2322 dprintk(8, "qla1280_get_nvram_word: exiting normally NVRAM data = "
2330 * Sends read command to NVRAM and gets data from NVRAM.
2333 * ha = adapter block pointer.
2334 * nv_cmd = Bit 26 = start bit
2335 * Bit 25, 24 = opcode
2336 * Bit 23-16 = address
2337 * Bit 15-0 = write data
2343 qla1280_nvram_request(struct scsi_qla_host *ha, uint32_t nv_cmd)
2345 struct device_reg __iomem *reg = ha->iobase;
2350 /* Send command to NVRAM. */
2353 for (cnt = 0; cnt < 11; cnt++) {
2354 if (nv_cmd & BIT_31)
2355 qla1280_nv_write(ha, NV_DATA_OUT);
2357 qla1280_nv_write(ha, 0);
2361 /* Read data from NVRAM. */
2363 for (cnt = 0; cnt < 16; cnt++) {
2364 WRT_REG_WORD(®->nvram, (NV_SELECT | NV_CLOCK));
2365 RD_REG_WORD(®->id_l); /* Flush PCI write */
2368 reg_data = RD_REG_WORD(®->nvram);
2369 if (reg_data & NV_DATA_IN)
2371 WRT_REG_WORD(®->nvram, NV_SELECT);
2372 RD_REG_WORD(®->id_l); /* Flush PCI write */
2376 /* Deselect chip. */
2378 WRT_REG_WORD(®->nvram, NV_DESELECT);
2379 RD_REG_WORD(®->id_l); /* Flush PCI write */
2386 qla1280_nv_write(struct scsi_qla_host *ha, uint16_t data)
2388 struct device_reg __iomem *reg = ha->iobase;
2390 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2391 RD_REG_WORD(®->id_l); /* Flush PCI write */
2393 WRT_REG_WORD(®->nvram, data | NV_SELECT | NV_CLOCK);
2394 RD_REG_WORD(®->id_l); /* Flush PCI write */
2396 WRT_REG_WORD(®->nvram, data | NV_SELECT);
2397 RD_REG_WORD(®->id_l); /* Flush PCI write */
2403 * Issue mailbox command and waits for completion.
2406 * ha = adapter block pointer.
2407 * mr = mailbox registers to load.
2408 * mb = data pointer for mailbox registers.
2411 * mb[MAILBOX_REGISTER_COUNT] = returned mailbox data.
2417 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
2419 struct device_reg __iomem *reg = ha->iobase;
2425 uint16_t *optr, *iptr;
2426 uint16_t __iomem *mptr;
2428 DECLARE_COMPLETION_ONSTACK(wait);
2429 struct timer_list timer;
2431 ENTER("qla1280_mailbox_command");
2433 if (ha->mailbox_wait) {
2434 printk(KERN_ERR "Warning mailbox wait already in use!\n");
2436 ha->mailbox_wait = &wait;
2439 * We really should start out by verifying that the mailbox is
2440 * available before starting sending the command data
2442 /* Load mailbox registers. */
2443 mptr = (uint16_t __iomem *) ®->mailbox0;
2445 for (cnt = 0; cnt < MAILBOX_REGISTER_COUNT; cnt++) {
2447 WRT_REG_WORD(mptr, (*iptr));
2455 /* Issue set host interrupt command. */
2457 /* set up a timer just in case we're really jammed */
2459 timer.expires = jiffies + 20*HZ;
2460 timer.data = (unsigned long)ha;
2461 timer.function = qla1280_mailbox_timeout;
2464 spin_unlock_irq(ha->host->host_lock);
2465 WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
2466 data = qla1280_debounce_register(®->istatus);
2468 wait_for_completion(&wait);
2469 del_timer_sync(&timer);
2471 spin_lock_irq(ha->host->host_lock);
2473 ha->mailbox_wait = NULL;
2475 /* Check for mailbox command timeout. */
2476 if (ha->mailbox_out[0] != MBS_CMD_CMP) {
2477 printk(KERN_WARNING "qla1280_mailbox_command: Command failed, "
2478 "mailbox0 = 0x%04x, mailbox_out0 = 0x%04x, istatus = "
2480 mb[0], ha->mailbox_out[0], RD_REG_WORD(®->istatus));
2481 printk(KERN_WARNING "m0 %04x, m1 %04x, m2 %04x, m3 %04x\n",
2482 RD_REG_WORD(®->mailbox0), RD_REG_WORD(®->mailbox1),
2483 RD_REG_WORD(®->mailbox2), RD_REG_WORD(®->mailbox3));
2484 printk(KERN_WARNING "m4 %04x, m5 %04x, m6 %04x, m7 %04x\n",
2485 RD_REG_WORD(®->mailbox4), RD_REG_WORD(®->mailbox5),
2486 RD_REG_WORD(®->mailbox6), RD_REG_WORD(®->mailbox7));
2490 /* Load return mailbox registers. */
2492 iptr = (uint16_t *) &ha->mailbox_out[0];
2493 mr = MAILBOX_REGISTER_COUNT;
2494 memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
2497 /* Go check for any response interrupts pending. */
2498 qla1280_isr(ha, &done_q);
2501 if (ha->flags.reset_marker)
2502 qla1280_rst_aen(ha);
2505 if (!list_empty(&done_q))
2506 qla1280_done(ha, &done_q);
2510 dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
2511 "0x%x ****\n", mb[0]);
2513 LEAVE("qla1280_mailbox_command");
2519 * Polls ISP for interrupts.
2522 * ha = adapter block pointer.
2525 qla1280_poll(struct scsi_qla_host *ha)
2527 struct device_reg __iomem *reg = ha->iobase;
2531 /* ENTER("qla1280_poll"); */
2533 /* Check for pending interrupts. */
2534 data = RD_REG_WORD(®->istatus);
2535 if (data & RISC_INT)
2536 qla1280_isr(ha, &done_q);
2538 if (!ha->mailbox_wait) {
2539 if (ha->flags.reset_marker)
2540 qla1280_rst_aen(ha);
2543 if (!list_empty(&done_q))
2546 /* LEAVE("qla1280_poll"); */
2551 * Issue SCSI bus reset.
2554 * ha = adapter block pointer.
2555 * bus = SCSI bus number.
2561 qla1280_bus_reset(struct scsi_qla_host *ha, int bus)
2563 uint16_t mb[MAILBOX_REGISTER_COUNT];
2564 uint16_t reset_delay;
2567 dprintk(3, "qla1280_bus_reset: entered\n");
2569 if (qla1280_verbose)
2570 printk(KERN_INFO "scsi(%li:%i): Resetting SCSI BUS\n",
2573 reset_delay = ha->bus_settings[bus].bus_reset_delay;
2574 mb[0] = MBC_BUS_RESET;
2575 mb[1] = reset_delay;
2576 mb[2] = (uint16_t) bus;
2577 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2580 if (ha->bus_settings[bus].failed_reset_count > 2)
2581 ha->bus_settings[bus].scsi_bus_dead = 1;
2582 ha->bus_settings[bus].failed_reset_count++;
2584 spin_unlock_irq(ha->host->host_lock);
2585 ssleep(reset_delay);
2586 spin_lock_irq(ha->host->host_lock);
2588 ha->bus_settings[bus].scsi_bus_dead = 0;
2589 ha->bus_settings[bus].failed_reset_count = 0;
2590 ha->bus_settings[bus].reset_marker = 0;
2591 /* Issue marker command. */
2592 qla1280_marker(ha, bus, 0, 0, MK_SYNC_ALL);
2596 * We should probably call qla1280_set_target_parameters()
2597 * here as well for all devices on the bus.
2601 dprintk(2, "qla1280_bus_reset: **** FAILED ****\n");
2603 dprintk(3, "qla1280_bus_reset: exiting normally\n");
2609 * qla1280_device_reset
2610 * Issue bus device reset message to the target.
2613 * ha = adapter block pointer.
2614 * bus = SCSI BUS number.
2621 qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
2623 uint16_t mb[MAILBOX_REGISTER_COUNT];
2626 ENTER("qla1280_device_reset");
2628 mb[0] = MBC_ABORT_TARGET;
2629 mb[1] = (bus ? (target | BIT_7) : target) << 8;
2631 status = qla1280_mailbox_command(ha, BIT_2 | BIT_1 | BIT_0, &mb[0]);
2633 /* Issue marker command. */
2634 qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
2637 dprintk(2, "qla1280_device_reset: **** FAILED ****\n");
2639 LEAVE("qla1280_device_reset");
2644 * qla1280_abort_device
2645 * Issue an abort message to the device
2648 * ha = adapter block pointer.
2657 qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
2659 uint16_t mb[MAILBOX_REGISTER_COUNT];
2662 ENTER("qla1280_abort_device");
2664 mb[0] = MBC_ABORT_DEVICE;
2665 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2666 status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
2668 /* Issue marker command. */
2669 qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
2672 dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
2674 LEAVE("qla1280_abort_device");
2679 * qla1280_abort_command
2680 * Abort command aborts a specified IOCB.
2683 * ha = adapter block pointer.
2684 * sp = SB structure pointer.
2690 qla1280_abort_command(struct scsi_qla_host *ha, struct srb * sp, int handle)
2692 uint16_t mb[MAILBOX_REGISTER_COUNT];
2693 unsigned int bus, target, lun;
2696 ENTER("qla1280_abort_command");
2698 bus = SCSI_BUS_32(sp->cmd);
2699 target = SCSI_TCN_32(sp->cmd);
2700 lun = SCSI_LUN_32(sp->cmd);
2702 sp->flags |= SRB_ABORT_PENDING;
2704 mb[0] = MBC_ABORT_COMMAND;
2705 mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
2706 mb[2] = handle >> 16;
2707 mb[3] = handle & 0xffff;
2708 status = qla1280_mailbox_command(ha, 0x0f, &mb[0]);
2711 dprintk(2, "qla1280_abort_command: **** FAILED ****\n");
2712 sp->flags &= ~SRB_ABORT_PENDING;
2716 LEAVE("qla1280_abort_command");
2721 * qla1280_reset_adapter
2725 * ha = adapter block pointer.
2728 qla1280_reset_adapter(struct scsi_qla_host *ha)
2730 struct device_reg __iomem *reg = ha->iobase;
2732 ENTER("qla1280_reset_adapter");
2734 /* Disable ISP chip */
2735 ha->flags.online = 0;
2736 WRT_REG_WORD(®->ictrl, ISP_RESET);
2737 WRT_REG_WORD(®->host_cmd,
2738 HC_RESET_RISC | HC_RELEASE_RISC | HC_DISABLE_BIOS);
2739 RD_REG_WORD(®->id_l); /* Flush PCI write */
2741 LEAVE("qla1280_reset_adapter");
2745 * Issue marker command.
2746 * Function issues marker IOCB.
2749 * ha = adapter block pointer.
2750 * bus = SCSI BUS number
2753 * type = marker modifier
2756 qla1280_marker(struct scsi_qla_host *ha, int bus, int id, int lun, u8 type)
2758 struct mrk_entry *pkt;
2760 ENTER("qla1280_marker");
2762 /* Get request packet. */
2763 if ((pkt = (struct mrk_entry *) qla1280_req_pkt(ha))) {
2764 pkt->entry_type = MARKER_TYPE;
2765 pkt->lun = (uint8_t) lun;
2766 pkt->target = (uint8_t) (bus ? (id | BIT_7) : id);
2767 pkt->modifier = type;
2768 pkt->entry_status = 0;
2770 /* Issue command to ISP */
2771 qla1280_isp_cmd(ha);
2774 LEAVE("qla1280_marker");
2779 * qla1280_64bit_start_scsi
2780 * The start SCSI is responsible for building request packets on
2781 * request ring and modifying ISP input pointer.
2784 * ha = adapter block pointer.
2785 * sp = SB structure pointer.
2788 * 0 = success, was able to issue command.
2790 #ifdef QLA_64BIT_PTR
2792 qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
2794 struct device_reg __iomem *reg = ha->iobase;
2795 struct scsi_cmnd *cmd = sp->cmd;
2796 cmd_a64_entry_t *pkt;
2798 dma_addr_t dma_handle;
2805 ENTER("qla1280_64bit_start_scsi:");
2807 /* Calculate number of entries and segments required. */
2809 seg_cnt = scsi_dma_map(cmd);
2812 req_cnt += (seg_cnt - 2) / 5;
2813 if ((seg_cnt - 2) % 5)
2816 } else if (seg_cnt < 0) {
2821 if ((req_cnt + 2) >= ha->req_q_cnt) {
2822 /* Calculate number of free request entries. */
2823 cnt = RD_REG_WORD(®->mailbox4);
2824 if (ha->req_ring_index < cnt)
2825 ha->req_q_cnt = cnt - ha->req_ring_index;
2828 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
2831 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
2832 ha->req_q_cnt, seg_cnt);
2834 /* If room for request in request ring. */
2835 if ((req_cnt + 2) >= ha->req_q_cnt) {
2837 dprintk(2, "qla1280_start_scsi: in-ptr=0x%x req_q_cnt="
2838 "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
2843 /* Check for room in outstanding command list. */
2844 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
2845 ha->outstanding_cmds[cnt] != NULL; cnt++);
2847 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
2849 dprintk(2, "qla1280_start_scsi: NO ROOM IN "
2850 "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
2854 ha->outstanding_cmds[cnt] = sp;
2855 ha->req_q_cnt -= req_cnt;
2856 CMD_HANDLE(sp->cmd) = (unsigned char *)(unsigned long)(cnt + 1);
2858 dprintk(2, "start: cmd=%p sp=%p CDB=%xm, handle %lx\n", cmd, sp,
2859 cmd->cmnd[0], (long)CMD_HANDLE(sp->cmd));
2860 dprintk(2, " bus %i, target %i, lun %i\n",
2861 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
2862 qla1280_dump_buffer(2, cmd->cmnd, MAX_COMMAND_SIZE);
2865 * Build command packet.
2867 pkt = (cmd_a64_entry_t *) ha->request_ring_ptr;
2869 pkt->entry_type = COMMAND_A64_TYPE;
2870 pkt->entry_count = (uint8_t) req_cnt;
2871 pkt->sys_define = (uint8_t) ha->req_ring_index;
2872 pkt->entry_status = 0;
2873 pkt->handle = cpu_to_le32(cnt);
2875 /* Zero out remaining portion of packet. */
2876 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
2878 /* Set ISP command timeout. */
2879 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
2881 /* Set device target ID and LUN */
2882 pkt->lun = SCSI_LUN_32(cmd);
2883 pkt->target = SCSI_BUS_32(cmd) ?
2884 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
2886 /* Enable simple tag queuing if device supports it. */
2887 if (cmd->device->simple_tags)
2888 pkt->control_flags |= cpu_to_le16(BIT_3);
2890 /* Load SCSI command packet. */
2891 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
2892 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
2893 /* dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
2895 /* Set transfer direction. */
2896 dir = qla1280_data_direction(cmd);
2897 pkt->control_flags |= cpu_to_le16(dir);
2899 /* Set total data segment count. */
2900 pkt->dseg_count = cpu_to_le16(seg_cnt);
2903 * Load data segments.
2905 if (seg_cnt) { /* If data transfer. */
2906 struct scatterlist *sg, *s;
2907 int remseg = seg_cnt;
2909 sg = scsi_sglist(cmd);
2911 /* Setup packet address segment pointer. */
2912 dword_ptr = (u32 *)&pkt->dseg_0_address;
2914 /* Load command entry data segments. */
2915 for_each_sg(sg, s, seg_cnt, cnt) {
2919 dma_handle = sg_dma_address(s);
2920 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2921 if (ha->flags.use_pci_vchannel)
2922 sn_pci_set_vchan(ha->pdev,
2923 (unsigned long *)&dma_handle,
2927 cpu_to_le32(pci_dma_lo32(dma_handle));
2929 cpu_to_le32(pci_dma_hi32(dma_handle));
2930 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
2931 dprintk(3, "S/G Segment phys_addr=%x %x, len=0x%x\n",
2932 cpu_to_le32(pci_dma_hi32(dma_handle)),
2933 cpu_to_le32(pci_dma_lo32(dma_handle)),
2934 cpu_to_le32(sg_dma_len(sg_next(s))));
2937 dprintk(5, "qla1280_64bit_start_scsi: Scatter/gather "
2938 "command packet data - b %i, t %i, l %i \n",
2939 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
2941 qla1280_dump_buffer(5, (char *)pkt,
2942 REQUEST_ENTRY_SIZE);
2945 * Build continuation packets.
2947 dprintk(3, "S/G Building Continuation...seg_cnt=0x%x "
2948 "remains\n", seg_cnt);
2950 while (remseg > 0) {
2951 /* Update sg start */
2953 /* Adjust ring index. */
2954 ha->req_ring_index++;
2955 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
2956 ha->req_ring_index = 0;
2957 ha->request_ring_ptr =
2960 ha->request_ring_ptr++;
2962 pkt = (cmd_a64_entry_t *)ha->request_ring_ptr;
2964 /* Zero out packet. */
2965 memset(pkt, 0, REQUEST_ENTRY_SIZE);
2967 /* Load packet defaults. */
2968 ((struct cont_a64_entry *) pkt)->entry_type =
2970 ((struct cont_a64_entry *) pkt)->entry_count = 1;
2971 ((struct cont_a64_entry *) pkt)->sys_define =
2972 (uint8_t)ha->req_ring_index;
2973 /* Setup packet address segment pointer. */
2975 (u32 *)&((struct cont_a64_entry *) pkt)->dseg_0_address;
2977 /* Load continuation entry data segments. */
2978 for_each_sg(sg, s, remseg, cnt) {
2981 dma_handle = sg_dma_address(s);
2982 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2)
2983 if (ha->flags.use_pci_vchannel)
2984 sn_pci_set_vchan(ha->pdev,
2985 (unsigned long *)&dma_handle,
2989 cpu_to_le32(pci_dma_lo32(dma_handle));
2991 cpu_to_le32(pci_dma_hi32(dma_handle));
2993 cpu_to_le32(sg_dma_len(s));
2994 dprintk(3, "S/G Segment Cont. phys_addr=%x %x, len=0x%x\n",
2995 cpu_to_le32(pci_dma_hi32(dma_handle)),
2996 cpu_to_le32(pci_dma_lo32(dma_handle)),
2997 cpu_to_le32(sg_dma_len(s)));
3000 dprintk(5, "qla1280_64bit_start_scsi: "
3001 "continuation packet data - b %i, t "
3002 "%i, l %i \n", SCSI_BUS_32(cmd),
3003 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3004 qla1280_dump_buffer(5, (char *)pkt,
3005 REQUEST_ENTRY_SIZE);
3007 } else { /* No data transfer */
3008 dprintk(5, "qla1280_64bit_start_scsi: No data, command "
3009 "packet data - b %i, t %i, l %i \n",
3010 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3011 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3013 /* Adjust ring index. */
3014 ha->req_ring_index++;
3015 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3016 ha->req_ring_index = 0;
3017 ha->request_ring_ptr = ha->request_ring;
3019 ha->request_ring_ptr++;
3021 /* Set chip new ring index. */
3023 "qla1280_64bit_start_scsi: Wakeup RISC for pending command\n");
3024 sp->flags |= SRB_SENT;
3026 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3027 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3032 dprintk(2, "qla1280_64bit_start_scsi: **** FAILED ****\n");
3034 dprintk(3, "qla1280_64bit_start_scsi: exiting normally\n");
3038 #else /* !QLA_64BIT_PTR */
3041 * qla1280_32bit_start_scsi
3042 * The start SCSI is responsible for building request packets on
3043 * request ring and modifying ISP input pointer.
3045 * The Qlogic firmware interface allows every queue slot to have a SCSI
3046 * command and up to 4 scatter/gather (SG) entries. If we need more
3047 * than 4 SG entries, then continuation entries are used that can
3048 * hold another 7 entries each. The start routine determines if there
3049 * is eought empty slots then build the combination of requests to
3050 * fulfill the OS request.
3053 * ha = adapter block pointer.
3054 * sp = SCSI Request Block structure pointer.
3057 * 0 = success, was able to issue command.
3060 qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
3062 struct device_reg __iomem *reg = ha->iobase;
3063 struct scsi_cmnd *cmd = sp->cmd;
3064 struct cmd_entry *pkt;
3072 ENTER("qla1280_32bit_start_scsi");
3074 dprintk(1, "32bit_start: cmd=%p sp=%p CDB=%x\n", cmd, sp,
3077 /* Calculate number of entries and segments required. */
3079 seg_cnt = scsi_dma_map(cmd);
3082 * if greater than four sg entries then we need to allocate
3083 * continuation entries
3086 req_cnt += (seg_cnt - 4) / 7;
3087 if ((seg_cnt - 4) % 7)
3090 dprintk(3, "S/G Transfer cmd=%p seg_cnt=0x%x, req_cnt=%x\n",
3091 cmd, seg_cnt, req_cnt);
3092 } else if (seg_cnt < 0) {
3097 if ((req_cnt + 2) >= ha->req_q_cnt) {
3098 /* Calculate number of free request entries. */
3099 cnt = RD_REG_WORD(®->mailbox4);
3100 if (ha->req_ring_index < cnt)
3101 ha->req_q_cnt = cnt - ha->req_ring_index;
3104 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3107 dprintk(3, "Number of free entries=(%d) seg_cnt=0x%x\n",
3108 ha->req_q_cnt, seg_cnt);
3109 /* If room for request in request ring. */
3110 if ((req_cnt + 2) >= ha->req_q_cnt) {
3112 dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
3113 "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
3114 ha->req_q_cnt, req_cnt);
3118 /* Check for empty slot in outstanding command list. */
3119 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS &&
3120 (ha->outstanding_cmds[cnt] != 0); cnt++) ;
3122 if (cnt >= MAX_OUTSTANDING_COMMANDS) {
3124 dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
3125 "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
3129 CMD_HANDLE(sp->cmd) = (unsigned char *) (unsigned long)(cnt + 1);
3130 ha->outstanding_cmds[cnt] = sp;
3131 ha->req_q_cnt -= req_cnt;
3134 * Build command packet.
3136 pkt = (struct cmd_entry *) ha->request_ring_ptr;
3138 pkt->entry_type = COMMAND_TYPE;
3139 pkt->entry_count = (uint8_t) req_cnt;
3140 pkt->sys_define = (uint8_t) ha->req_ring_index;
3141 pkt->entry_status = 0;
3142 pkt->handle = cpu_to_le32(cnt);
3144 /* Zero out remaining portion of packet. */
3145 memset(((char *)pkt + 8), 0, (REQUEST_ENTRY_SIZE - 8));
3147 /* Set ISP command timeout. */
3148 pkt->timeout = cpu_to_le16(cmd->request->timeout/HZ);
3150 /* Set device target ID and LUN */
3151 pkt->lun = SCSI_LUN_32(cmd);
3152 pkt->target = SCSI_BUS_32(cmd) ?
3153 (SCSI_TCN_32(cmd) | BIT_7) : SCSI_TCN_32(cmd);
3155 /* Enable simple tag queuing if device supports it. */
3156 if (cmd->device->simple_tags)
3157 pkt->control_flags |= cpu_to_le16(BIT_3);
3159 /* Load SCSI command packet. */
3160 pkt->cdb_len = cpu_to_le16(CMD_CDBLEN(cmd));
3161 memcpy(pkt->scsi_cdb, CMD_CDBP(cmd), CMD_CDBLEN(cmd));
3163 /*dprintk(1, "Build packet for command[0]=0x%x\n",pkt->scsi_cdb[0]); */
3164 /* Set transfer direction. */
3165 dir = qla1280_data_direction(cmd);
3166 pkt->control_flags |= cpu_to_le16(dir);
3168 /* Set total data segment count. */
3169 pkt->dseg_count = cpu_to_le16(seg_cnt);
3172 * Load data segments.
3175 struct scatterlist *sg, *s;
3176 int remseg = seg_cnt;
3178 sg = scsi_sglist(cmd);
3180 /* Setup packet address segment pointer. */
3181 dword_ptr = &pkt->dseg_0_address;
3183 dprintk(3, "Building S/G data segments..\n");
3184 qla1280_dump_buffer(1, (char *)sg, 4 * 16);
3186 /* Load command entry data segments. */
3187 for_each_sg(sg, s, seg_cnt, cnt) {
3191 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3192 *dword_ptr++ = cpu_to_le32(sg_dma_len(s));
3193 dprintk(3, "S/G Segment phys_addr=0x%lx, len=0x%x\n",
3194 (pci_dma_lo32(sg_dma_address(s))),
3199 * Build continuation packets.
3201 dprintk(3, "S/G Building Continuation"
3202 "...seg_cnt=0x%x remains\n", seg_cnt);
3203 while (remseg > 0) {
3204 /* Continue from end point */
3206 /* Adjust ring index. */
3207 ha->req_ring_index++;
3208 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3209 ha->req_ring_index = 0;
3210 ha->request_ring_ptr =
3213 ha->request_ring_ptr++;
3215 pkt = (struct cmd_entry *)ha->request_ring_ptr;
3217 /* Zero out packet. */
3218 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3220 /* Load packet defaults. */
3221 ((struct cont_entry *) pkt)->
3222 entry_type = CONTINUE_TYPE;
3223 ((struct cont_entry *) pkt)->entry_count = 1;
3225 ((struct cont_entry *) pkt)->sys_define =
3226 (uint8_t) ha->req_ring_index;
3228 /* Setup packet address segment pointer. */
3230 &((struct cont_entry *) pkt)->dseg_0_address;
3232 /* Load continuation entry data segments. */
3233 for_each_sg(sg, s, remseg, cnt) {
3237 cpu_to_le32(pci_dma_lo32(sg_dma_address(s)));
3239 cpu_to_le32(sg_dma_len(s));
3241 "S/G Segment Cont. phys_addr=0x%x, "
3243 cpu_to_le32(pci_dma_lo32(sg_dma_address(s))),
3244 cpu_to_le32(sg_dma_len(s)));
3247 dprintk(5, "qla1280_32bit_start_scsi: "
3248 "continuation packet data - "
3249 "scsi(%i:%i:%i)\n", SCSI_BUS_32(cmd),
3250 SCSI_TCN_32(cmd), SCSI_LUN_32(cmd));
3251 qla1280_dump_buffer(5, (char *)pkt,
3252 REQUEST_ENTRY_SIZE);
3254 } else { /* No data transfer at all */
3255 dprintk(5, "qla1280_32bit_start_scsi: No data, command "
3256 "packet data - \n");
3257 qla1280_dump_buffer(5, (char *)pkt, REQUEST_ENTRY_SIZE);
3259 dprintk(5, "qla1280_32bit_start_scsi: First IOCB block:\n");
3260 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3261 REQUEST_ENTRY_SIZE);
3263 /* Adjust ring index. */
3264 ha->req_ring_index++;
3265 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3266 ha->req_ring_index = 0;
3267 ha->request_ring_ptr = ha->request_ring;
3269 ha->request_ring_ptr++;
3271 /* Set chip new ring index. */
3272 dprintk(2, "qla1280_32bit_start_scsi: Wakeup RISC "
3273 "for pending command\n");
3274 sp->flags |= SRB_SENT;
3276 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3277 /* Enforce mmio write ordering; see comment in qla1280_isp_cmd(). */
3282 dprintk(2, "qla1280_32bit_start_scsi: **** FAILED ****\n");
3284 LEAVE("qla1280_32bit_start_scsi");
3292 * Function is responsible for locking ring and
3293 * getting a zeroed out request packet.
3296 * ha = adapter block pointer.
3299 * 0 = failed to get slot.
3302 qla1280_req_pkt(struct scsi_qla_host *ha)
3304 struct device_reg __iomem *reg = ha->iobase;
3305 request_t *pkt = NULL;
3309 ENTER("qla1280_req_pkt");
3312 * This can be called from interrupt context, damn it!!!
3314 /* Wait for 30 seconds for slot. */
3315 for (timer = 15000000; timer; timer--) {
3316 if (ha->req_q_cnt > 0) {
3317 /* Calculate number of free request entries. */
3318 cnt = RD_REG_WORD(®->mailbox4);
3319 if (ha->req_ring_index < cnt)
3320 ha->req_q_cnt = cnt - ha->req_ring_index;
3323 REQUEST_ENTRY_CNT - (ha->req_ring_index - cnt);
3326 /* Found empty request ring slot? */
3327 if (ha->req_q_cnt > 0) {
3329 pkt = ha->request_ring_ptr;
3331 /* Zero out packet. */
3332 memset(pkt, 0, REQUEST_ENTRY_SIZE);
3335 * How can this be right when we have a ring
3338 /* Set system defined field. */
3339 pkt->sys_define = (uint8_t) ha->req_ring_index;
3341 /* Set entry count. */
3342 pkt->entry_count = 1;
3349 /* Check for pending interrupts. */
3354 dprintk(2, "qla1280_req_pkt: **** FAILED ****\n");
3356 dprintk(3, "qla1280_req_pkt: exiting normally\n");
3363 * Function is responsible for modifying ISP input pointer.
3364 * Releases ring lock.
3367 * ha = adapter block pointer.
3370 qla1280_isp_cmd(struct scsi_qla_host *ha)
3372 struct device_reg __iomem *reg = ha->iobase;
3374 ENTER("qla1280_isp_cmd");
3376 dprintk(5, "qla1280_isp_cmd: IOCB data:\n");
3377 qla1280_dump_buffer(5, (char *)ha->request_ring_ptr,
3378 REQUEST_ENTRY_SIZE);
3380 /* Adjust ring index. */
3381 ha->req_ring_index++;
3382 if (ha->req_ring_index == REQUEST_ENTRY_CNT) {
3383 ha->req_ring_index = 0;
3384 ha->request_ring_ptr = ha->request_ring;
3386 ha->request_ring_ptr++;
3389 * Update request index to mailbox4 (Request Queue In).
3390 * The mmiowb() ensures that this write is ordered with writes by other
3391 * CPUs. Without the mmiowb(), it is possible for the following:
3392 * CPUA posts write of index 5 to mailbox4
3393 * CPUA releases host lock
3394 * CPUB acquires host lock
3395 * CPUB posts write of index 6 to mailbox4
3396 * On PCI bus, order reverses and write of 6 posts, then index 5,
3397 * causing chip to issue full queue of stale commands
3398 * The mmiowb() prevents future writes from crossing the barrier.
3399 * See Documentation/DocBook/deviceiobook.tmpl for more information.
3401 WRT_REG_WORD(®->mailbox4, ha->req_ring_index);
3404 LEAVE("qla1280_isp_cmd");
3407 /****************************************************************************/
3408 /* Interrupt Service Routine. */
3409 /****************************************************************************/
3411 /****************************************************************************
3413 * Calls I/O done on command completion.
3416 * ha = adapter block pointer.
3417 * done_q = done queue.
3418 ****************************************************************************/
3420 qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
3422 struct device_reg __iomem *reg = ha->iobase;
3423 struct response *pkt;
3424 struct srb *sp = NULL;
3425 uint16_t mailbox[MAILBOX_REGISTER_COUNT];
3430 ENTER("qla1280_isr");
3432 istatus = RD_REG_WORD(®->istatus);
3433 if (!(istatus & (RISC_INT | PCI_INT)))
3436 /* Save mailbox register 5 */
3437 mailbox[5] = RD_REG_WORD(®->mailbox5);
3439 /* Check for mailbox interrupt. */
3441 mailbox[0] = RD_REG_WORD_dmasync(®->semaphore);
3443 if (mailbox[0] & BIT_0) {
3444 /* Get mailbox data. */
3445 /* dprintk(1, "qla1280_isr: In Get mailbox data \n"); */
3448 *wptr++ = RD_REG_WORD(®->mailbox0);
3449 *wptr++ = RD_REG_WORD(®->mailbox1);
3450 *wptr = RD_REG_WORD(®->mailbox2);
3451 if (mailbox[0] != MBA_SCSI_COMPLETION) {
3453 *wptr++ = RD_REG_WORD(®->mailbox3);
3454 *wptr++ = RD_REG_WORD(®->mailbox4);
3456 *wptr++ = RD_REG_WORD(®->mailbox6);
3457 *wptr = RD_REG_WORD(®->mailbox7);
3460 /* Release mailbox registers. */
3462 WRT_REG_WORD(®->semaphore, 0);
3463 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3465 dprintk(5, "qla1280_isr: mailbox interrupt mailbox[0] = 0x%x",
3468 /* Handle asynchronous event */
3469 switch (mailbox[0]) {
3470 case MBA_SCSI_COMPLETION: /* Response completion */
3471 dprintk(5, "qla1280_isr: mailbox SCSI response "
3474 if (ha->flags.online) {
3475 /* Get outstanding command index. */
3476 index = mailbox[2] << 16 | mailbox[1];
3478 /* Validate handle. */
3479 if (index < MAX_OUTSTANDING_COMMANDS)
3480 sp = ha->outstanding_cmds[index];
3485 /* Free outstanding command slot. */
3486 ha->outstanding_cmds[index] = NULL;
3488 /* Save ISP completion status */
3489 CMD_RESULT(sp->cmd) = 0;
3491 /* Place block on done queue */
3492 list_add_tail(&sp->list, done_q);
3495 * If we get here we have a real problem!
3498 "qla1280: ISP invalid handle");
3503 case MBA_BUS_RESET: /* SCSI Bus Reset */
3504 ha->flags.reset_marker = 1;
3505 index = mailbox[6] & BIT_0;
3506 ha->bus_settings[index].reset_marker = 1;
3508 printk(KERN_DEBUG "qla1280_isr(): index %i "
3509 "asynchronous BUS_RESET\n", index);
3512 case MBA_SYSTEM_ERR: /* System Error */
3514 "qla1280: ISP System Error - mbx1=%xh, mbx2="
3515 "%xh, mbx3=%xh\n", mailbox[1], mailbox[2],
3519 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3521 "qla1280: ISP Request Transfer Error\n");
3524 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3526 "qla1280: ISP Response Transfer Error\n");
3529 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
3530 dprintk(2, "qla1280_isr: asynchronous WAKEUP_THRES\n");
3533 case MBA_TIMEOUT_RESET: /* Execution Timeout Reset */
3535 "qla1280_isr: asynchronous TIMEOUT_RESET\n");
3538 case MBA_DEVICE_RESET: /* Bus Device Reset */
3539 printk(KERN_INFO "qla1280_isr(): asynchronous "
3540 "BUS_DEVICE_RESET\n");
3542 ha->flags.reset_marker = 1;
3543 index = mailbox[6] & BIT_0;
3544 ha->bus_settings[index].reset_marker = 1;
3547 case MBA_BUS_MODE_CHANGE:
3549 "qla1280_isr: asynchronous BUS_MODE_CHANGE\n");
3553 /* dprintk(1, "qla1280_isr: default case of switch MB \n"); */
3554 if (mailbox[0] < MBA_ASYNC_EVENT) {
3556 memcpy((uint16_t *) ha->mailbox_out, wptr,
3557 MAILBOX_REGISTER_COUNT *
3560 if(ha->mailbox_wait != NULL)
3561 complete(ha->mailbox_wait);
3566 WRT_REG_WORD(®->host_cmd, HC_CLR_RISC_INT);
3570 * We will receive interrupts during mailbox testing prior to
3571 * the card being marked online, hence the double check.
3573 if (!(ha->flags.online && !ha->mailbox_wait)) {
3574 dprintk(2, "qla1280_isr: Response pointer Error\n");
3578 if (mailbox[5] >= RESPONSE_ENTRY_CNT)
3581 while (ha->rsp_ring_index != mailbox[5]) {
3582 pkt = ha->response_ring_ptr;
3584 dprintk(5, "qla1280_isr: ha->rsp_ring_index = 0x%x, mailbox[5]"
3585 " = 0x%x\n", ha->rsp_ring_index, mailbox[5]);
3586 dprintk(5,"qla1280_isr: response packet data\n");
3587 qla1280_dump_buffer(5, (char *)pkt, RESPONSE_ENTRY_SIZE);
3589 if (pkt->entry_type == STATUS_TYPE) {
3590 if ((le16_to_cpu(pkt->scsi_status) & 0xff)
3591 || pkt->comp_status || pkt->entry_status) {
3592 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3593 "0x%x mailbox[5] = 0x%x, comp_status "
3594 "= 0x%x, scsi_status = 0x%x\n",
3595 ha->rsp_ring_index, mailbox[5],
3596 le16_to_cpu(pkt->comp_status),
3597 le16_to_cpu(pkt->scsi_status));
3600 dprintk(2, "qla1280_isr: ha->rsp_ring_index = "
3601 "0x%x, mailbox[5] = 0x%x\n",
3602 ha->rsp_ring_index, mailbox[5]);
3603 dprintk(2, "qla1280_isr: response packet data\n");
3604 qla1280_dump_buffer(2, (char *)pkt,
3605 RESPONSE_ENTRY_SIZE);
3608 if (pkt->entry_type == STATUS_TYPE || pkt->entry_status) {
3609 dprintk(2, "status: Cmd %p, handle %i\n",
3610 ha->outstanding_cmds[pkt->handle]->cmd,
3612 if (pkt->entry_type == STATUS_TYPE)
3613 qla1280_status_entry(ha, pkt, done_q);
3615 qla1280_error_entry(ha, pkt, done_q);
3616 /* Adjust ring index. */
3617 ha->rsp_ring_index++;
3618 if (ha->rsp_ring_index == RESPONSE_ENTRY_CNT) {
3619 ha->rsp_ring_index = 0;
3620 ha->response_ring_ptr = ha->response_ring;
3622 ha->response_ring_ptr++;
3623 WRT_REG_WORD(®->mailbox5, ha->rsp_ring_index);
3628 LEAVE("qla1280_isr");
3633 * Processes asynchronous reset.
3636 * ha = adapter block pointer.
3639 qla1280_rst_aen(struct scsi_qla_host *ha)
3643 ENTER("qla1280_rst_aen");
3645 if (ha->flags.online && !ha->flags.reset_active &&
3646 !ha->flags.abort_isp_active) {
3647 ha->flags.reset_active = 1;
3648 while (ha->flags.reset_marker) {
3649 /* Issue marker command. */
3650 ha->flags.reset_marker = 0;
3651 for (bus = 0; bus < ha->ports &&
3652 !ha->flags.reset_marker; bus++) {
3653 if (ha->bus_settings[bus].reset_marker) {
3654 ha->bus_settings[bus].reset_marker = 0;
3655 qla1280_marker(ha, bus, 0, 0,
3662 LEAVE("qla1280_rst_aen");
3667 * qla1280_status_entry
3668 * Processes received ISP status entry.
3671 * ha = adapter block pointer.
3672 * pkt = entry pointer.
3673 * done_q = done queue.
3676 qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
3677 struct list_head *done_q)
3679 unsigned int bus, target, lun;
3682 struct scsi_cmnd *cmd;
3683 uint32_t handle = le32_to_cpu(pkt->handle);
3684 uint16_t scsi_status = le16_to_cpu(pkt->scsi_status);
3685 uint16_t comp_status = le16_to_cpu(pkt->comp_status);
3687 ENTER("qla1280_status_entry");
3689 /* Validate handle. */
3690 if (handle < MAX_OUTSTANDING_COMMANDS)
3691 sp = ha->outstanding_cmds[handle];
3696 printk(KERN_WARNING "qla1280: Status Entry invalid handle\n");
3700 /* Free outstanding command slot. */
3701 ha->outstanding_cmds[handle] = NULL;
3705 /* Generate LU queue on cntrl, target, LUN */
3706 bus = SCSI_BUS_32(cmd);
3707 target = SCSI_TCN_32(cmd);
3708 lun = SCSI_LUN_32(cmd);
3710 if (comp_status || scsi_status) {
3711 dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
3712 "0x%x, handle = 0x%x\n", comp_status,
3713 scsi_status, handle);
3716 /* Target busy or queue full */
3717 if ((scsi_status & 0xFF) == SAM_STAT_TASK_SET_FULL ||
3718 (scsi_status & 0xFF) == SAM_STAT_BUSY) {
3719 CMD_RESULT(cmd) = scsi_status & 0xff;
3722 /* Save ISP completion status */
3723 CMD_RESULT(cmd) = qla1280_return_status(pkt, cmd);
3725 if (scsi_status & SAM_STAT_CHECK_CONDITION) {
3726 if (comp_status != CS_ARS_FAILED) {
3727 uint16_t req_sense_length =
3728 le16_to_cpu(pkt->req_sense_length);
3729 if (req_sense_length < CMD_SNSLEN(cmd))
3730 sense_sz = req_sense_length;
3733 * scsi_cmnd->sense_buffer is
3734 * 64 bytes, why only copy 63?
3735 * This looks wrong! /Jes
3737 sense_sz = CMD_SNSLEN(cmd) - 1;
3739 memcpy(cmd->sense_buffer,
3740 &pkt->req_sense_data, sense_sz);
3743 memset(cmd->sense_buffer + sense_sz, 0,
3744 SCSI_SENSE_BUFFERSIZE - sense_sz);
3746 dprintk(2, "qla1280_status_entry: Check "
3747 "condition Sense data, b %i, t %i, "
3748 "l %i\n", bus, target, lun);
3750 qla1280_dump_buffer(2,
3751 (char *)cmd->sense_buffer,
3756 /* Place command on done queue. */
3757 list_add_tail(&sp->list, done_q);
3759 LEAVE("qla1280_status_entry");
3763 * qla1280_error_entry
3764 * Processes error entry.
3767 * ha = adapter block pointer.
3768 * pkt = entry pointer.
3769 * done_q = done queue.
3772 qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
3773 struct list_head *done_q)
3776 uint32_t handle = le32_to_cpu(pkt->handle);
3778 ENTER("qla1280_error_entry");
3780 if (pkt->entry_status & BIT_3)
3781 dprintk(2, "qla1280_error_entry: BAD PAYLOAD flag error\n");
3782 else if (pkt->entry_status & BIT_2)
3783 dprintk(2, "qla1280_error_entry: BAD HEADER flag error\n");
3784 else if (pkt->entry_status & BIT_1)
3785 dprintk(2, "qla1280_error_entry: FULL flag error\n");
3787 dprintk(2, "qla1280_error_entry: UNKNOWN flag error\n");
3789 /* Validate handle. */
3790 if (handle < MAX_OUTSTANDING_COMMANDS)
3791 sp = ha->outstanding_cmds[handle];
3796 /* Free outstanding command slot. */
3797 ha->outstanding_cmds[handle] = NULL;
3799 /* Bad payload or header */
3800 if (pkt->entry_status & (BIT_3 + BIT_2)) {
3801 /* Bad payload or header, set error status. */
3802 /* CMD_RESULT(sp->cmd) = CS_BAD_PAYLOAD; */
3803 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3804 } else if (pkt->entry_status & BIT_1) { /* FULL flag */
3805 CMD_RESULT(sp->cmd) = DID_BUS_BUSY << 16;
3807 /* Set error status. */
3808 CMD_RESULT(sp->cmd) = DID_ERROR << 16;
3811 /* Place command on done queue. */
3812 list_add_tail(&sp->list, done_q);
3814 #ifdef QLA_64BIT_PTR
3815 else if (pkt->entry_type == COMMAND_A64_TYPE) {
3816 printk(KERN_WARNING "!qla1280: Error Entry invalid handle");
3820 LEAVE("qla1280_error_entry");
3825 * Resets ISP and aborts all outstanding commands.
3828 * ha = adapter block pointer.
3834 qla1280_abort_isp(struct scsi_qla_host *ha)
3836 struct device_reg __iomem *reg = ha->iobase;
3842 ENTER("qla1280_abort_isp");
3844 if (ha->flags.abort_isp_active || !ha->flags.online)
3847 ha->flags.abort_isp_active = 1;
3849 /* Disable ISP interrupts. */
3850 qla1280_disable_intrs(ha);
3851 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3852 RD_REG_WORD(®->id_l);
3854 printk(KERN_INFO "scsi(%li): dequeuing outstanding commands\n",
3856 /* Dequeue all commands in outstanding command list. */
3857 for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
3858 struct scsi_cmnd *cmd;
3859 sp = ha->outstanding_cmds[cnt];
3863 CMD_RESULT(cmd) = DID_RESET << 16;
3866 ha->outstanding_cmds[cnt] = NULL;
3868 (*cmd->scsi_done)(cmd);
3874 status = qla1280_load_firmware(ha);
3878 /* Setup adapter based on NVRAM parameters. */
3879 qla1280_nvram_config (ha);
3881 status = qla1280_init_rings(ha);
3885 /* Issue SCSI reset. */
3886 for (bus = 0; bus < ha->ports; bus++)
3887 qla1280_bus_reset(ha, bus);
3889 ha->flags.abort_isp_active = 0;
3893 "qla1280: ISP error recovery failed, board disabled");
3894 qla1280_reset_adapter(ha);
3895 dprintk(2, "qla1280_abort_isp: **** FAILED ****\n");
3898 LEAVE("qla1280_abort_isp");
3904 * qla1280_debounce_register
3905 * Debounce register.
3908 * port = register address.
3914 qla1280_debounce_register(volatile u16 __iomem * addr)
3919 ret = RD_REG_WORD(addr);
3920 ret2 = RD_REG_WORD(addr);
3927 ret = RD_REG_WORD(addr);
3928 ret2 = RD_REG_WORD(addr);
3929 } while (ret != ret2);
3935 /************************************************************************
3936 * qla1280_check_for_dead_scsi_bus *
3938 * This routine checks for a dead SCSI bus *
3939 ************************************************************************/
3940 #define SET_SXP_BANK 0x0100
3941 #define SCSI_PHASE_INVALID 0x87FF
3943 qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
3945 uint16_t config_reg, scsi_control;
3946 struct device_reg __iomem *reg = ha->iobase;
3948 if (ha->bus_settings[bus].scsi_bus_dead) {
3949 WRT_REG_WORD(®->host_cmd, HC_PAUSE_RISC);
3950 config_reg = RD_REG_WORD(®->cfg_1);
3951 WRT_REG_WORD(®->cfg_1, SET_SXP_BANK);
3952 scsi_control = RD_REG_WORD(®->scsiControlPins);
3953 WRT_REG_WORD(®->cfg_1, config_reg);
3954 WRT_REG_WORD(®->host_cmd, HC_RELEASE_RISC);
3956 if (scsi_control == SCSI_PHASE_INVALID) {
3957 ha->bus_settings[bus].scsi_bus_dead = 1;
3959 CMD_RESULT(cp) = DID_NO_CONNECT << 16;
3960 CMD_HANDLE(cp) = INVALID_HANDLE;
3961 /* ha->actthreads--; */
3963 (*(cp)->scsi_done)(cp);
3965 return 1; /* bus is dead */
3967 ha->bus_settings[bus].scsi_bus_dead = 0;
3968 ha->bus_settings[bus].failed_reset_count = 0;
3971 return 0; /* bus is not dead */
3975 qla1280_get_target_parameters(struct scsi_qla_host *ha,
3976 struct scsi_device *device)
3978 uint16_t mb[MAILBOX_REGISTER_COUNT];
3979 int bus, target, lun;
3981 bus = device->channel;
3982 target = device->id;
3986 mb[0] = MBC_GET_TARGET_PARAMETERS;
3987 mb[1] = (uint16_t) (bus ? target | BIT_7 : target);
3989 qla1280_mailbox_command(ha, BIT_6 | BIT_3 | BIT_2 | BIT_1 | BIT_0,
3992 printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
3995 printk(" Sync: period %d, offset %d",
3996 (mb[3] & 0xff), (mb[3] >> 8));
3999 if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
4004 if (device->simple_tags)
4005 printk(", Tagged queuing: depth %d", device->queue_depth);
4012 __qla1280_dump_buffer(char *b, int size)
4017 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 Ah "
4018 "Bh Ch Dh Eh Fh\n");
4019 printk(KERN_DEBUG "---------------------------------------------"
4020 "------------------\n");
4022 for (cnt = 0; cnt < size;) {
4025 printk("0x%02x", c);
4036 /**************************************************************************
4037 * ql1280_print_scsi_cmd
4039 **************************************************************************/
4041 __qla1280_print_scsi_cmd(struct scsi_cmnd *cmd)
4043 struct scsi_qla_host *ha;
4044 struct Scsi_Host *host = CMD_HOST(cmd);
4046 /* struct scatterlist *sg; */
4049 ha = (struct scsi_qla_host *)host->hostdata;
4051 sp = (struct srb *)CMD_SP(cmd);
4052 printk("SCSI Command @= 0x%p, Handle=0x%p\n", cmd, CMD_HANDLE(cmd));
4053 printk(" chan=%d, target = 0x%02x, lun = 0x%02x, cmd_len = 0x%02x\n",
4054 SCSI_BUS_32(cmd), SCSI_TCN_32(cmd), SCSI_LUN_32(cmd),
4057 for (i = 0; i < cmd->cmd_len; i++) {
4058 printk("0x%02x ", cmd->cmnd[i]);
4060 printk(" seg_cnt =%d\n", scsi_sg_count(cmd));
4061 printk(" request buffer=0x%p, request buffer len=0x%x\n",
4062 scsi_sglist(cmd), scsi_bufflen(cmd));
4065 sg = (struct scatterlist *) cmd->request_buffer;
4066 printk(" SG buffer: \n");
4067 qla1280_dump_buffer(1, (char *)sg, (cmd->use_sg*sizeof(struct scatterlist)));
4069 printk(" tag=%d, transfersize=0x%x \n",
4070 cmd->tag, cmd->transfersize);
4071 printk(" Pid=%li, SP=0x%p\n", cmd->serial_number, CMD_SP(cmd));
4072 printk(" underflow size = 0x%x, direction=0x%x\n",
4073 cmd->underflow, cmd->sc_data_direction);
4076 /**************************************************************************
4077 * ql1280_dump_device
4079 **************************************************************************/
4081 ql1280_dump_device(struct scsi_qla_host *ha)
4084 struct scsi_cmnd *cp;
4088 printk(KERN_DEBUG "Outstanding Commands on controller:\n");
4090 for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
4091 if ((sp = ha->outstanding_cmds[i]) == NULL)
4093 if ((cp = sp->cmd) == NULL)
4095 qla1280_print_scsi_cmd(1, cp);
4110 struct setup_tokens {
4115 static struct setup_tokens setup_token[] __initdata =
4117 { "nvram", TOKEN_NVRAM },
4118 { "sync", TOKEN_SYNC },
4119 { "wide", TOKEN_WIDE },
4120 { "ppr", TOKEN_PPR },
4121 { "verbose", TOKEN_VERBOSE },
4122 { "debug", TOKEN_DEBUG },
4126 /**************************************************************************
4129 * Handle boot parameters. This really needs to be changed so one
4130 * can specify per adapter parameters.
4131 **************************************************************************/
4133 qla1280_setup(char *s)
4141 while (cp && (ptr = strchr(cp, ':'))) {
4143 if (!strcmp(ptr, "yes")) {
4146 } else if (!strcmp(ptr, "no")) {
4150 val = simple_strtoul(ptr, &ptr, 0);
4152 switch ((toke = qla1280_get_token(cp))) {
4155 driver_setup.no_nvram = 1;
4159 driver_setup.no_sync = 1;
4160 else if (val != 0x10000)
4161 driver_setup.sync_mask = val;
4165 driver_setup.no_wide = 1;
4166 else if (val != 0x10000)
4167 driver_setup.wide_mask = val;
4171 driver_setup.no_ppr = 1;
4172 else if (val != 0x10000)
4173 driver_setup.ppr_mask = val;
4176 qla1280_verbose = val;
4179 printk(KERN_INFO "qla1280: unknown boot option %s\n",
4183 cp = strchr(ptr, ';');
4195 qla1280_get_token(char *str)
4201 sep = strchr(str, ':');
4204 for (i = 0; i < ARRAY_SIZE(setup_token); i++) {
4205 if (!strncmp(setup_token[i].token, str, (sep - str))) {
4206 ret = setup_token[i].val;
4216 static struct scsi_host_template qla1280_driver_template = {
4217 .module = THIS_MODULE,
4218 .proc_name = "qla1280",
4219 .name = "Qlogic ISP 1280/12160",
4220 .info = qla1280_info,
4221 .slave_configure = qla1280_slave_configure,
4222 .queuecommand = qla1280_queuecommand,
4223 .eh_abort_handler = qla1280_eh_abort,
4224 .eh_device_reset_handler= qla1280_eh_device_reset,
4225 .eh_bus_reset_handler = qla1280_eh_bus_reset,
4226 .eh_host_reset_handler = qla1280_eh_adapter_reset,
4227 .bios_param = qla1280_biosparam,
4228 .can_queue = 0xfffff,
4230 .sg_tablesize = SG_ALL,
4232 .use_clustering = ENABLE_CLUSTERING,
4236 static int __devinit
4237 qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4239 int devnum = id->driver_data;
4240 struct qla_boards *bdp = &ql1280_board_tbl[devnum];
4241 struct Scsi_Host *host;
4242 struct scsi_qla_host *ha;
4243 int error = -ENODEV;
4245 /* Bypass all AMI SUBSYS VENDOR IDs */
4246 if (pdev->subsystem_vendor == PCI_VENDOR_ID_AMI) {
4248 "qla1280: Skipping AMI SubSys Vendor ID Chip\n");
4252 printk(KERN_INFO "qla1280: %s found on PCI bus %i, dev %i\n",
4253 bdp->name, pdev->bus->number, PCI_SLOT(pdev->devfn));
4255 if (pci_enable_device(pdev)) {
4257 "qla1280: Failed to enabled pci device, aborting.\n");
4261 pci_set_master(pdev);
4264 host = scsi_host_alloc(&qla1280_driver_template, sizeof(*ha));
4267 "qla1280: Failed to register host, aborting.\n");
4268 goto error_disable_device;
4271 ha = (struct scsi_qla_host *)host->hostdata;
4272 memset(ha, 0, sizeof(struct scsi_qla_host));
4275 ha->devnum = devnum; /* specifies microcode load address */
4277 #ifdef QLA_64BIT_PTR
4278 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
4279 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4280 printk(KERN_WARNING "scsi(%li): Unable to set a "
4281 "suitable DMA mask - aborting\n", ha->host_no);
4283 goto error_put_host;
4286 dprintk(2, "scsi(%li): 64 Bit PCI Addressing Enabled\n",
4289 if (pci_set_dma_mask(ha->pdev, DMA_BIT_MASK(32))) {
4290 printk(KERN_WARNING "scsi(%li): Unable to set a "
4291 "suitable DMA mask - aborting\n", ha->host_no);
4293 goto error_put_host;
4297 ha->request_ring = pci_alloc_consistent(ha->pdev,
4298 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4300 if (!ha->request_ring) {
4301 printk(KERN_INFO "qla1280: Failed to get request memory\n");
4302 goto error_put_host;
4305 ha->response_ring = pci_alloc_consistent(ha->pdev,
4306 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4308 if (!ha->response_ring) {
4309 printk(KERN_INFO "qla1280: Failed to get response memory\n");
4310 goto error_free_request_ring;
4313 ha->ports = bdp->numPorts;
4316 ha->host_no = host->host_no;
4318 host->irq = pdev->irq;
4319 host->max_channel = bdp->numPorts - 1;
4320 host->max_lun = MAX_LUNS - 1;
4321 host->max_id = MAX_TARGETS;
4322 host->max_sectors = 1024;
4323 host->unique_id = host->host_no;
4327 #if MEMORY_MAPPED_IO
4328 ha->mmpbase = pci_ioremap_bar(ha->pdev, 1);
4330 printk(KERN_INFO "qla1280: Unable to map I/O memory\n");
4331 goto error_free_response_ring;
4334 host->base = (unsigned long)ha->mmpbase;
4335 ha->iobase = (struct device_reg __iomem *)ha->mmpbase;
4337 host->io_port = pci_resource_start(ha->pdev, 0);
4338 if (!request_region(host->io_port, 0xff, "qla1280")) {
4339 printk(KERN_INFO "qla1280: Failed to reserve i/o region "
4340 "0x%04lx-0x%04lx - already in use\n",
4341 host->io_port, host->io_port + 0xff);
4342 goto error_free_response_ring;
4345 ha->iobase = (struct device_reg *)host->io_port;
4348 INIT_LIST_HEAD(&ha->done_q);
4350 /* Disable ISP interrupts. */
4351 qla1280_disable_intrs(ha);
4353 if (request_irq(pdev->irq, qla1280_intr_handler, IRQF_SHARED,
4355 printk("qla1280 : Failed to reserve interrupt %d already "
4356 "in use\n", pdev->irq);
4357 goto error_release_region;
4360 /* load the F/W, read paramaters, and init the H/W */
4361 if (qla1280_initialize_adapter(ha)) {
4362 printk(KERN_INFO "qla1x160: Failed to initialize adapter\n");
4363 goto error_free_irq;
4366 /* set our host ID (need to do something about our two IDs) */
4367 host->this_id = ha->bus_settings[0].id;
4369 pci_set_drvdata(pdev, host);
4371 error = scsi_add_host(host, &pdev->dev);
4373 goto error_disable_adapter;
4374 scsi_scan_host(host);
4378 error_disable_adapter:
4379 qla1280_disable_intrs(ha);
4381 free_irq(pdev->irq, ha);
4382 error_release_region:
4383 #if MEMORY_MAPPED_IO
4384 iounmap(ha->mmpbase);
4386 release_region(host->io_port, 0xff);
4388 error_free_response_ring:
4389 pci_free_consistent(ha->pdev,
4390 ((RESPONSE_ENTRY_CNT + 1) * sizeof(struct response)),
4391 ha->response_ring, ha->response_dma);
4392 error_free_request_ring:
4393 pci_free_consistent(ha->pdev,
4394 ((REQUEST_ENTRY_CNT + 1) * sizeof(request_t)),
4395 ha->request_ring, ha->request_dma);
4397 scsi_host_put(host);
4398 error_disable_device:
4399 pci_disable_device(pdev);
4405 static void __devexit
4406 qla1280_remove_one(struct pci_dev *pdev)
4408 struct Scsi_Host *host = pci_get_drvdata(pdev);
4409 struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
4411 scsi_remove_host(host);
4413 qla1280_disable_intrs(ha);
4415 free_irq(pdev->irq, ha);
4417 #if MEMORY_MAPPED_IO
4418 iounmap(ha->mmpbase);
4420 release_region(host->io_port, 0xff);
4423 pci_free_consistent(ha->pdev,
4424 ((REQUEST_ENTRY_CNT + 1) * (sizeof(request_t))),
4425 ha->request_ring, ha->request_dma);
4426 pci_free_consistent(ha->pdev,
4427 ((RESPONSE_ENTRY_CNT + 1) * (sizeof(struct response))),
4428 ha->response_ring, ha->response_dma);
4430 pci_disable_device(pdev);
4432 scsi_host_put(host);
4435 static struct pci_driver qla1280_pci_driver = {
4437 .id_table = qla1280_pci_tbl,
4438 .probe = qla1280_probe_one,
4439 .remove = __devexit_p(qla1280_remove_one),
4445 if (sizeof(struct srb) > sizeof(struct scsi_pointer)) {
4447 "qla1280: struct srb too big, aborting\n");
4453 * If we are called as a module, the qla1280 pointer may not be null
4454 * and it would point to our bootup string, just like on the lilo
4455 * command line. IF not NULL, then process this config string with
4459 * To add options at boot time add a line to your lilo.conf file like:
4460 * append="qla1280=verbose,max_tags:{{255,255,255,255},{255,255,255,255}}"
4461 * which will result in the first four devices on the first two
4462 * controllers being set to a tagged queue depth of 32.
4465 qla1280_setup(qla1280);
4468 return pci_register_driver(&qla1280_pci_driver);
4474 pci_unregister_driver(&qla1280_pci_driver);
4477 module_init(qla1280_init);
4478 module_exit(qla1280_exit);
4481 MODULE_AUTHOR("Qlogic & Jes Sorensen");
4482 MODULE_DESCRIPTION("Qlogic ISP SCSI (qla1x80/qla1x160) driver");
4483 MODULE_LICENSE("GPL");
4484 MODULE_FIRMWARE("qlogic/1040.bin");
4485 MODULE_FIRMWARE("qlogic/1280.bin");
4486 MODULE_FIRMWARE("qlogic/12160.bin");
4487 MODULE_VERSION(QLA1280_VERSION);
4490 * Overrides for Emacs so that we almost follow Linus's tabbing style.
4491 * Emacs will notice this stuff at the end of the file and automatically
4492 * adjust the settings for this buffer only. This must remain at the end
4494 * ---------------------------------------------------------------------------