2 * ipr.c -- driver for IBM Power Linux RAID adapters
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
6 * Copyright (C) 2003, 2004 IBM Corporation
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 * This driver is used to control the following SCSI adapters:
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
50 * - Tagged command queuing
51 * - Adapter microcode download
53 * - SCSI device hot plug
58 #include <linux/init.h>
59 #include <linux/types.h>
60 #include <linux/errno.h>
61 #include <linux/kernel.h>
62 #include <linux/ioport.h>
63 #include <linux/delay.h>
64 #include <linux/pci.h>
65 #include <linux/wait.h>
66 #include <linux/spinlock.h>
67 #include <linux/sched.h>
68 #include <linux/interrupt.h>
69 #include <linux/blkdev.h>
70 #include <linux/firmware.h>
71 #include <linux/module.h>
72 #include <linux/moduleparam.h>
73 #include <linux/libata.h>
74 #include <linux/hdreg.h>
75 #include <linux/reboot.h>
76 #include <linux/stringify.h>
79 #include <asm/processor.h>
80 #include <scsi/scsi.h>
81 #include <scsi/scsi_host.h>
82 #include <scsi/scsi_tcq.h>
83 #include <scsi/scsi_eh.h>
84 #include <scsi/scsi_cmnd.h>
90 static LIST_HEAD(ipr_ioa_head);
91 static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
92 static unsigned int ipr_max_speed = 1;
93 static int ipr_testmode = 0;
94 static unsigned int ipr_fastfail = 0;
95 static unsigned int ipr_transop_timeout = 0;
96 static unsigned int ipr_debug = 0;
97 static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
98 static unsigned int ipr_dual_ioa_raid = 1;
99 static DEFINE_SPINLOCK(ipr_driver_lock);
101 /* This table describes the differences between DMA controller chips */
102 static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
103 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105 .cache_line_size = 0x20,
107 .set_interrupt_mask_reg = 0x0022C,
108 .clr_interrupt_mask_reg = 0x00230,
109 .clr_interrupt_mask_reg32 = 0x00230,
110 .sense_interrupt_mask_reg = 0x0022C,
111 .sense_interrupt_mask_reg32 = 0x0022C,
112 .clr_interrupt_reg = 0x00228,
113 .clr_interrupt_reg32 = 0x00228,
114 .sense_interrupt_reg = 0x00224,
115 .sense_interrupt_reg32 = 0x00224,
116 .ioarrin_reg = 0x00404,
117 .sense_uproc_interrupt_reg = 0x00214,
118 .sense_uproc_interrupt_reg32 = 0x00214,
119 .set_uproc_interrupt_reg = 0x00214,
120 .set_uproc_interrupt_reg32 = 0x00214,
121 .clr_uproc_interrupt_reg = 0x00218,
122 .clr_uproc_interrupt_reg32 = 0x00218
125 { /* Snipe and Scamp */
127 .cache_line_size = 0x20,
129 .set_interrupt_mask_reg = 0x00288,
130 .clr_interrupt_mask_reg = 0x0028C,
131 .clr_interrupt_mask_reg32 = 0x0028C,
132 .sense_interrupt_mask_reg = 0x00288,
133 .sense_interrupt_mask_reg32 = 0x00288,
134 .clr_interrupt_reg = 0x00284,
135 .clr_interrupt_reg32 = 0x00284,
136 .sense_interrupt_reg = 0x00280,
137 .sense_interrupt_reg32 = 0x00280,
138 .ioarrin_reg = 0x00504,
139 .sense_uproc_interrupt_reg = 0x00290,
140 .sense_uproc_interrupt_reg32 = 0x00290,
141 .set_uproc_interrupt_reg = 0x00290,
142 .set_uproc_interrupt_reg32 = 0x00290,
143 .clr_uproc_interrupt_reg = 0x00294,
144 .clr_uproc_interrupt_reg32 = 0x00294
149 .cache_line_size = 0x20,
151 .set_interrupt_mask_reg = 0x00010,
152 .clr_interrupt_mask_reg = 0x00018,
153 .clr_interrupt_mask_reg32 = 0x0001C,
154 .sense_interrupt_mask_reg = 0x00010,
155 .sense_interrupt_mask_reg32 = 0x00014,
156 .clr_interrupt_reg = 0x00008,
157 .clr_interrupt_reg32 = 0x0000C,
158 .sense_interrupt_reg = 0x00000,
159 .sense_interrupt_reg32 = 0x00004,
160 .ioarrin_reg = 0x00070,
161 .sense_uproc_interrupt_reg = 0x00020,
162 .sense_uproc_interrupt_reg32 = 0x00024,
163 .set_uproc_interrupt_reg = 0x00020,
164 .set_uproc_interrupt_reg32 = 0x00024,
165 .clr_uproc_interrupt_reg = 0x00028,
166 .clr_uproc_interrupt_reg32 = 0x0002C,
167 .init_feedback_reg = 0x0005C,
168 .dump_addr_reg = 0x00064,
169 .dump_data_reg = 0x00068
174 static const struct ipr_chip_t ipr_chip[] = {
175 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
176 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
180 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
181 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] }
184 static int ipr_max_bus_speeds [] = {
185 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
188 MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
189 MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
190 module_param_named(max_speed, ipr_max_speed, uint, 0);
191 MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
192 module_param_named(log_level, ipr_log_level, uint, 0);
193 MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
194 module_param_named(testmode, ipr_testmode, int, 0);
195 MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
196 module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
197 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
198 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
199 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
200 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
201 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
202 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
203 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
204 module_param_named(max_devs, ipr_max_devs, int, 0);
205 MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
206 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
207 MODULE_LICENSE("GPL");
208 MODULE_VERSION(IPR_DRIVER_VERSION);
210 /* A constant array of IOASCs/URCs/Error Messages */
212 struct ipr_error_table_t ipr_error_table[] = {
213 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
214 "8155: An unknown error was received"},
216 "Soft underlength error"},
218 "Command to be cancelled not found"},
220 "Qualified success"},
221 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
222 "FFFE: Soft device bus error recovered by the IOA"},
223 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
224 "4101: Soft device bus fabric error"},
225 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
226 "FFF9: Device sector reassign successful"},
227 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
228 "FFF7: Media error recovered by device rewrite procedures"},
229 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
230 "7001: IOA sector reassignment successful"},
231 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
232 "FFF9: Soft media error. Sector reassignment recommended"},
233 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
234 "FFF7: Media error recovered by IOA rewrite procedures"},
235 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
236 "FF3D: Soft PCI bus error recovered by the IOA"},
237 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
238 "FFF6: Device hardware error recovered by the IOA"},
239 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
240 "FFF6: Device hardware error recovered by the device"},
241 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
242 "FF3D: Soft IOA error recovered by the IOA"},
243 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
244 "FFFA: Undefined device response recovered by the IOA"},
245 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
246 "FFF6: Device bus error, message or command phase"},
247 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
248 "FFFE: Task Management Function failed"},
249 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
250 "FFF6: Failure prediction threshold exceeded"},
251 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
252 "8009: Impending cache battery pack failure"},
254 "34FF: Disk device format in progress"},
255 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
256 "9070: IOA requested reset"},
258 "Synchronization required"},
260 "No ready, IOA shutdown"},
262 "Not ready, IOA has been shutdown"},
263 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
264 "3020: Storage subsystem configuration error"},
266 "FFF5: Medium error, data unreadable, recommend reassign"},
268 "7000: Medium error, data unreadable, do not reassign"},
269 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
270 "FFF3: Disk media format bad"},
271 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
272 "3002: Addressed device failed to respond to selection"},
273 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
274 "3100: Device bus error"},
275 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
276 "3109: IOA timed out a device command"},
278 "3120: SCSI bus is not operational"},
279 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
280 "4100: Hard device bus fabric error"},
281 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
282 "9000: IOA reserved area data check"},
283 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
284 "9001: IOA reserved area invalid data pattern"},
285 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
286 "9002: IOA reserved area LRC error"},
287 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
288 "102E: Out of alternate sectors for disk storage"},
289 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
290 "FFF4: Data transfer underlength error"},
291 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
292 "FFF4: Data transfer overlength error"},
293 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
294 "3400: Logical unit failure"},
295 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
296 "FFF4: Device microcode is corrupt"},
297 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
298 "8150: PCI bus error"},
300 "Unsupported device bus message received"},
301 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
302 "FFF4: Disk device problem"},
303 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
304 "8150: Permanent IOA failure"},
305 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
306 "3010: Disk device returned wrong response to IOA"},
307 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
308 "8151: IOA microcode error"},
310 "Device bus status error"},
311 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
312 "8157: IOA error requiring IOA reset to recover"},
314 "ATA device status error"},
316 "Message reject received from the device"},
317 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
318 "8008: A permanent cache battery pack failure occurred"},
319 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
320 "9090: Disk unit has been modified after the last known status"},
321 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
322 "9081: IOA detected device error"},
323 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
324 "9082: IOA detected device error"},
325 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
326 "3110: Device bus error, message or command phase"},
327 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
328 "3110: SAS Command / Task Management Function failed"},
329 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
330 "9091: Incorrect hardware configuration change has been detected"},
331 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
332 "9073: Invalid multi-adapter configuration"},
333 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
334 "4010: Incorrect connection between cascaded expanders"},
335 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
336 "4020: Connections exceed IOA design limits"},
337 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
338 "4030: Incorrect multipath connection"},
339 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
340 "4110: Unsupported enclosure function"},
341 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
342 "FFF4: Command to logical unit failed"},
344 "Illegal request, invalid request type or request packet"},
346 "Illegal request, invalid resource handle"},
348 "Illegal request, commands not allowed to this device"},
350 "Illegal request, command not allowed to a secondary adapter"},
352 "Illegal request, invalid field in parameter list"},
354 "Illegal request, parameter not supported"},
356 "Illegal request, parameter value invalid"},
358 "Illegal request, command sequence error"},
360 "Illegal request, dual adapter support not enabled"},
361 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
362 "9031: Array protection temporarily suspended, protection resuming"},
363 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
364 "9040: Array protection temporarily suspended, protection resuming"},
365 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
366 "3140: Device bus not ready to ready transition"},
367 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
368 "FFFB: SCSI bus was reset"},
370 "FFFE: SCSI bus transition to single ended"},
372 "FFFE: SCSI bus transition to LVD"},
373 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
374 "FFFB: SCSI bus was reset by another initiator"},
375 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
376 "3029: A device replacement has occurred"},
377 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
378 "9051: IOA cache data exists for a missing or failed device"},
379 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
380 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
381 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
382 "9025: Disk unit is not supported at its physical location"},
383 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
384 "3020: IOA detected a SCSI bus configuration error"},
385 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
386 "3150: SCSI bus configuration error"},
387 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
388 "9074: Asymmetric advanced function disk configuration"},
389 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
390 "4040: Incomplete multipath connection between IOA and enclosure"},
391 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
392 "4041: Incomplete multipath connection between enclosure and device"},
393 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
394 "9075: Incomplete multipath connection between IOA and remote IOA"},
395 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
396 "9076: Configuration error, missing remote IOA"},
397 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
398 "4050: Enclosure does not support a required multipath function"},
399 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
400 "4070: Logically bad block written on device"},
401 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
402 "9041: Array protection temporarily suspended"},
403 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
404 "9042: Corrupt array parity detected on specified device"},
405 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
406 "9030: Array no longer protected due to missing or failed disk unit"},
407 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
408 "9071: Link operational transition"},
409 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
410 "9072: Link not operational transition"},
411 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
412 "9032: Array exposed but still protected"},
413 {0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
414 "70DD: Device forced failed by disrupt device command"},
415 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
416 "4061: Multipath redundancy level got better"},
417 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
418 "4060: Multipath redundancy level got worse"},
420 "Failure due to other device"},
421 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
422 "9008: IOA does not support functions expected by devices"},
423 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
424 "9010: Cache data associated with attached devices cannot be found"},
425 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
426 "9011: Cache data belongs to devices other than those attached"},
427 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
428 "9020: Array missing 2 or more devices with only 1 device present"},
429 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
430 "9021: Array missing 2 or more devices with 2 or more devices present"},
431 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
432 "9022: Exposed array is missing a required device"},
433 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
434 "9023: Array member(s) not at required physical locations"},
435 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
436 "9024: Array not functional due to present hardware configuration"},
437 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
438 "9026: Array not functional due to present hardware configuration"},
439 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
440 "9027: Array is missing a device and parity is out of sync"},
441 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
442 "9028: Maximum number of arrays already exist"},
443 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
444 "9050: Required cache data cannot be located for a disk unit"},
445 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
446 "9052: Cache data exists for a device that has been modified"},
447 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
448 "9054: IOA resources not available due to previous problems"},
449 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
450 "9092: Disk unit requires initialization before use"},
451 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
452 "9029: Incorrect hardware configuration change has been detected"},
453 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
454 "9060: One or more disk pairs are missing from an array"},
455 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
456 "9061: One or more disks are missing from an array"},
457 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
458 "9062: One or more disks are missing from an array"},
459 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
460 "9063: Maximum number of functional arrays has been exceeded"},
462 "Aborted command, invalid descriptor"},
464 "Command terminated by host"}
467 static const struct ipr_ses_table_entry ipr_ses_table[] = {
468 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
469 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
470 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
471 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
472 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
473 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
474 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
475 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
476 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
477 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
478 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
479 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
480 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
484 * Function Prototypes
486 static int ipr_reset_alert(struct ipr_cmnd *);
487 static void ipr_process_ccn(struct ipr_cmnd *);
488 static void ipr_process_error(struct ipr_cmnd *);
489 static void ipr_reset_ioa_job(struct ipr_cmnd *);
490 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
491 enum ipr_shutdown_type);
493 #ifdef CONFIG_SCSI_IPR_TRACE
495 * ipr_trc_hook - Add a trace entry to the driver trace
496 * @ipr_cmd: ipr command struct
498 * @add_data: additional data
503 static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
504 u8 type, u32 add_data)
506 struct ipr_trace_entry *trace_entry;
507 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
509 trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
510 trace_entry->time = jiffies;
511 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
512 trace_entry->type = type;
513 if (ipr_cmd->ioa_cfg->sis64)
514 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
516 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
517 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
518 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
519 trace_entry->u.add_data = add_data;
522 #define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
526 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
527 * @ipr_cmd: ipr command struct
532 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
535 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
536 dma_addr_t dma_addr = ipr_cmd->dma_addr;
538 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
539 ioarcb->data_transfer_length = 0;
540 ioarcb->read_data_transfer_length = 0;
541 ioarcb->ioadl_len = 0;
542 ioarcb->read_ioadl_len = 0;
544 if (ipr_cmd->ioa_cfg->sis64)
545 ioarcb->u.sis64_addr_data.data_ioadl_addr =
546 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
548 ioarcb->write_ioadl_addr =
549 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
550 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
554 ioasa->residual_data_len = 0;
555 ioasa->u.gata.status = 0;
557 ipr_cmd->scsi_cmd = NULL;
559 ipr_cmd->sense_buffer[0] = 0;
560 ipr_cmd->dma_use_sg = 0;
564 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
565 * @ipr_cmd: ipr command struct
570 static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
572 ipr_reinit_ipr_cmnd(ipr_cmd);
573 ipr_cmd->u.scratch = 0;
574 ipr_cmd->sibling = NULL;
575 init_timer(&ipr_cmd->timer);
579 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
580 * @ioa_cfg: ioa config struct
583 * pointer to ipr command struct
586 struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
588 struct ipr_cmnd *ipr_cmd;
590 ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
591 list_del(&ipr_cmd->queue);
592 ipr_init_ipr_cmnd(ipr_cmd);
598 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
599 * @ioa_cfg: ioa config struct
600 * @clr_ints: interrupts to clear
602 * This function masks all interrupts on the adapter, then clears the
603 * interrupts specified in the mask
608 static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
611 volatile u32 int_reg;
613 /* Stop new interrupts */
614 ioa_cfg->allow_interrupts = 0;
616 /* Set interrupt mask to stop all new interrupts */
618 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
620 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
622 /* Clear any pending interrupts */
624 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
625 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
626 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
630 * ipr_save_pcix_cmd_reg - Save PCI-X command register
631 * @ioa_cfg: ioa config struct
634 * 0 on success / -EIO on failure
636 static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
638 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
640 if (pcix_cmd_reg == 0)
643 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
644 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
645 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
649 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
654 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
655 * @ioa_cfg: ioa config struct
658 * 0 on success / -EIO on failure
660 static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
662 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
665 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
666 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
667 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
676 * ipr_sata_eh_done - done function for aborted SATA commands
677 * @ipr_cmd: ipr command struct
679 * This function is invoked for ops generated to SATA
680 * devices which are being aborted.
685 static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
687 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
688 struct ata_queued_cmd *qc = ipr_cmd->qc;
689 struct ipr_sata_port *sata_port = qc->ap->private_data;
691 qc->err_mask |= AC_ERR_OTHER;
692 sata_port->ioasa.status |= ATA_BUSY;
693 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
698 * ipr_scsi_eh_done - mid-layer done function for aborted ops
699 * @ipr_cmd: ipr command struct
701 * This function is invoked by the interrupt handler for
702 * ops generated by the SCSI mid-layer which are being aborted.
707 static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
709 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
710 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
712 scsi_cmd->result |= (DID_ERROR << 16);
714 scsi_dma_unmap(ipr_cmd->scsi_cmd);
715 scsi_cmd->scsi_done(scsi_cmd);
716 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
720 * ipr_fail_all_ops - Fails all outstanding ops.
721 * @ioa_cfg: ioa config struct
723 * This function fails all outstanding ops.
728 static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
730 struct ipr_cmnd *ipr_cmd, *temp;
733 list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
734 list_del(&ipr_cmd->queue);
736 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
737 ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
739 if (ipr_cmd->scsi_cmd)
740 ipr_cmd->done = ipr_scsi_eh_done;
741 else if (ipr_cmd->qc)
742 ipr_cmd->done = ipr_sata_eh_done;
744 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
745 del_timer(&ipr_cmd->timer);
746 ipr_cmd->done(ipr_cmd);
753 * ipr_send_command - Send driver initiated requests.
754 * @ipr_cmd: ipr command struct
756 * This function sends a command to the adapter using the correct write call.
757 * In the case of sis64, calculate the ioarcb size required. Then or in the
763 static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
765 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
766 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
768 if (ioa_cfg->sis64) {
769 /* The default size is 256 bytes */
770 send_dma_addr |= 0x1;
772 /* If the number of ioadls * size of ioadl > 128 bytes,
773 then use a 512 byte ioarcb */
774 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
775 send_dma_addr |= 0x4;
776 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
778 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
782 * ipr_do_req - Send driver initiated requests.
783 * @ipr_cmd: ipr command struct
784 * @done: done function
785 * @timeout_func: timeout function
786 * @timeout: timeout value
788 * This function sends the specified command to the adapter with the
789 * timeout given. The done function is invoked on command completion.
794 static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
795 void (*done) (struct ipr_cmnd *),
796 void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
798 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
800 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
802 ipr_cmd->done = done;
804 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
805 ipr_cmd->timer.expires = jiffies + timeout;
806 ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
808 add_timer(&ipr_cmd->timer);
810 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
814 ipr_send_command(ipr_cmd);
818 * ipr_internal_cmd_done - Op done function for an internally generated op.
819 * @ipr_cmd: ipr command struct
821 * This function is the op done function for an internally generated,
822 * blocking op. It simply wakes the sleeping thread.
827 static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
829 if (ipr_cmd->sibling)
830 ipr_cmd->sibling = NULL;
832 complete(&ipr_cmd->completion);
836 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
837 * @ipr_cmd: ipr command struct
838 * @dma_addr: dma address
839 * @len: transfer length
840 * @flags: ioadl flag value
842 * This function initializes an ioadl in the case where there is only a single
848 static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
851 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
852 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
854 ipr_cmd->dma_use_sg = 1;
856 if (ipr_cmd->ioa_cfg->sis64) {
857 ioadl64->flags = cpu_to_be32(flags);
858 ioadl64->data_len = cpu_to_be32(len);
859 ioadl64->address = cpu_to_be64(dma_addr);
861 ipr_cmd->ioarcb.ioadl_len =
862 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
863 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
865 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
866 ioadl->address = cpu_to_be32(dma_addr);
868 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
869 ipr_cmd->ioarcb.read_ioadl_len =
870 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
871 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
873 ipr_cmd->ioarcb.ioadl_len =
874 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
875 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
881 * ipr_send_blocking_cmd - Send command and sleep on its completion.
882 * @ipr_cmd: ipr command struct
883 * @timeout_func: function to invoke if command times out
889 static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
890 void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
893 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
895 init_completion(&ipr_cmd->completion);
896 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
898 spin_unlock_irq(ioa_cfg->host->host_lock);
899 wait_for_completion(&ipr_cmd->completion);
900 spin_lock_irq(ioa_cfg->host->host_lock);
904 * ipr_send_hcam - Send an HCAM to the adapter.
905 * @ioa_cfg: ioa config struct
907 * @hostrcb: hostrcb struct
909 * This function will send a Host Controlled Async command to the adapter.
910 * If HCAMs are currently not allowed to be issued to the adapter, it will
911 * place the hostrcb on the free queue.
916 static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
917 struct ipr_hostrcb *hostrcb)
919 struct ipr_cmnd *ipr_cmd;
920 struct ipr_ioarcb *ioarcb;
922 if (ioa_cfg->allow_cmds) {
923 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
924 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
925 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
927 ipr_cmd->u.hostrcb = hostrcb;
928 ioarcb = &ipr_cmd->ioarcb;
930 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
931 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
932 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
933 ioarcb->cmd_pkt.cdb[1] = type;
934 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
935 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
937 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
938 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
940 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
941 ipr_cmd->done = ipr_process_ccn;
943 ipr_cmd->done = ipr_process_error;
945 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
949 ipr_send_command(ipr_cmd);
951 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
956 * ipr_update_ata_class - Update the ata class in the resource entry
957 * @res: resource entry struct
958 * @proto: cfgte device bus protocol value
963 static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
967 case IPR_PROTO_SAS_STP:
968 res->ata_class = ATA_DEV_ATA;
970 case IPR_PROTO_SATA_ATAPI:
971 case IPR_PROTO_SAS_STP_ATAPI:
972 res->ata_class = ATA_DEV_ATAPI;
975 res->ata_class = ATA_DEV_UNKNOWN;
981 * ipr_init_res_entry - Initialize a resource entry struct.
982 * @res: resource entry struct
983 * @cfgtew: config table entry wrapper struct
988 static void ipr_init_res_entry(struct ipr_resource_entry *res,
989 struct ipr_config_table_entry_wrapper *cfgtew)
993 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
994 struct ipr_resource_entry *gscsi_res = NULL;
996 res->needs_sync_complete = 0;
999 res->del_from_ml = 0;
1000 res->resetting_device = 0;
1002 res->sata_port = NULL;
1004 if (ioa_cfg->sis64) {
1005 proto = cfgtew->u.cfgte64->proto;
1006 res->res_flags = cfgtew->u.cfgte64->res_flags;
1007 res->qmodel = IPR_QUEUEING_MODEL64(res);
1008 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1010 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1011 sizeof(res->res_path));
1014 res->lun = scsilun_to_int(&res->dev_lun);
1016 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1017 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1018 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1020 res->target = gscsi_res->target;
1025 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1026 ioa_cfg->max_devs_supported);
1027 set_bit(res->target, ioa_cfg->target_ids);
1030 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1031 sizeof(res->dev_lun.scsi_lun));
1032 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1033 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1035 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1036 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1037 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1038 ioa_cfg->max_devs_supported);
1039 set_bit(res->target, ioa_cfg->array_ids);
1040 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1041 res->bus = IPR_VSET_VIRTUAL_BUS;
1042 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1043 ioa_cfg->max_devs_supported);
1044 set_bit(res->target, ioa_cfg->vset_ids);
1046 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1047 ioa_cfg->max_devs_supported);
1048 set_bit(res->target, ioa_cfg->target_ids);
1051 proto = cfgtew->u.cfgte->proto;
1052 res->qmodel = IPR_QUEUEING_MODEL(res);
1053 res->flags = cfgtew->u.cfgte->flags;
1054 if (res->flags & IPR_IS_IOA_RESOURCE)
1055 res->type = IPR_RES_TYPE_IOAFP;
1057 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1059 res->bus = cfgtew->u.cfgte->res_addr.bus;
1060 res->target = cfgtew->u.cfgte->res_addr.target;
1061 res->lun = cfgtew->u.cfgte->res_addr.lun;
1064 ipr_update_ata_class(res, proto);
1068 * ipr_is_same_device - Determine if two devices are the same.
1069 * @res: resource entry struct
1070 * @cfgtew: config table entry wrapper struct
1073 * 1 if the devices are the same / 0 otherwise
1075 static int ipr_is_same_device(struct ipr_resource_entry *res,
1076 struct ipr_config_table_entry_wrapper *cfgtew)
1078 if (res->ioa_cfg->sis64) {
1079 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1080 sizeof(cfgtew->u.cfgte64->dev_id)) &&
1081 !memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1082 sizeof(cfgtew->u.cfgte64->lun))) {
1086 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1087 res->target == cfgtew->u.cfgte->res_addr.target &&
1088 res->lun == cfgtew->u.cfgte->res_addr.lun)
1096 * ipr_format_resource_path - Format the resource path for printing.
1097 * @res_path: resource path
1103 static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1107 sprintf(buffer, "%02X", res_path[0]);
1108 for (i=1; res_path[i] != 0xff; i++)
1109 sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1115 * ipr_update_res_entry - Update the resource entry.
1116 * @res: resource entry struct
1117 * @cfgtew: config table entry wrapper struct
1122 static void ipr_update_res_entry(struct ipr_resource_entry *res,
1123 struct ipr_config_table_entry_wrapper *cfgtew)
1125 char buffer[IPR_MAX_RES_PATH_LENGTH];
1129 if (res->ioa_cfg->sis64) {
1130 res->flags = cfgtew->u.cfgte64->flags;
1131 res->res_flags = cfgtew->u.cfgte64->res_flags;
1132 res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1134 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1135 sizeof(struct ipr_std_inq_data));
1137 res->qmodel = IPR_QUEUEING_MODEL64(res);
1138 proto = cfgtew->u.cfgte64->proto;
1139 res->res_handle = cfgtew->u.cfgte64->res_handle;
1140 res->dev_id = cfgtew->u.cfgte64->dev_id;
1142 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1143 sizeof(res->dev_lun.scsi_lun));
1145 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1146 sizeof(res->res_path))) {
1147 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1148 sizeof(res->res_path));
1152 if (res->sdev && new_path)
1153 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1154 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1156 res->flags = cfgtew->u.cfgte->flags;
1157 if (res->flags & IPR_IS_IOA_RESOURCE)
1158 res->type = IPR_RES_TYPE_IOAFP;
1160 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1162 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1163 sizeof(struct ipr_std_inq_data));
1165 res->qmodel = IPR_QUEUEING_MODEL(res);
1166 proto = cfgtew->u.cfgte->proto;
1167 res->res_handle = cfgtew->u.cfgte->res_handle;
1170 ipr_update_ata_class(res, proto);
1174 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1176 * @res: resource entry struct
1177 * @cfgtew: config table entry wrapper struct
1182 static void ipr_clear_res_target(struct ipr_resource_entry *res)
1184 struct ipr_resource_entry *gscsi_res = NULL;
1185 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1187 if (!ioa_cfg->sis64)
1190 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1191 clear_bit(res->target, ioa_cfg->array_ids);
1192 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1193 clear_bit(res->target, ioa_cfg->vset_ids);
1194 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1195 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1196 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1198 clear_bit(res->target, ioa_cfg->target_ids);
1200 } else if (res->bus == 0)
1201 clear_bit(res->target, ioa_cfg->target_ids);
1205 * ipr_handle_config_change - Handle a config change from the adapter
1206 * @ioa_cfg: ioa config struct
1212 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1213 struct ipr_hostrcb *hostrcb)
1215 struct ipr_resource_entry *res = NULL;
1216 struct ipr_config_table_entry_wrapper cfgtew;
1217 __be32 cc_res_handle;
1221 if (ioa_cfg->sis64) {
1222 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1223 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1225 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1226 cc_res_handle = cfgtew.u.cfgte->res_handle;
1229 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1230 if (res->res_handle == cc_res_handle) {
1237 if (list_empty(&ioa_cfg->free_res_q)) {
1238 ipr_send_hcam(ioa_cfg,
1239 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1244 res = list_entry(ioa_cfg->free_res_q.next,
1245 struct ipr_resource_entry, queue);
1247 list_del(&res->queue);
1248 ipr_init_res_entry(res, &cfgtew);
1249 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1252 ipr_update_res_entry(res, &cfgtew);
1254 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1256 res->del_from_ml = 1;
1257 res->res_handle = IPR_INVALID_RES_HANDLE;
1258 if (ioa_cfg->allow_ml_add_del)
1259 schedule_work(&ioa_cfg->work_q);
1261 ipr_clear_res_target(res);
1262 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1264 } else if (!res->sdev) {
1266 if (ioa_cfg->allow_ml_add_del)
1267 schedule_work(&ioa_cfg->work_q);
1270 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1274 * ipr_process_ccn - Op done function for a CCN.
1275 * @ipr_cmd: ipr command struct
1277 * This function is the op done function for a configuration
1278 * change notification host controlled async from the adapter.
1283 static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1285 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1286 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1287 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1289 list_del(&hostrcb->queue);
1290 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1293 if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1294 dev_err(&ioa_cfg->pdev->dev,
1295 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1297 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1299 ipr_handle_config_change(ioa_cfg, hostrcb);
1304 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1305 * @i: index into buffer
1306 * @buf: string to modify
1308 * This function will strip all trailing whitespace, pad the end
1309 * of the string with a single space, and NULL terminate the string.
1312 * new length of string
1314 static int strip_and_pad_whitespace(int i, char *buf)
1316 while (i && buf[i] == ' ')
1324 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1325 * @prefix: string to print at start of printk
1326 * @hostrcb: hostrcb pointer
1327 * @vpd: vendor/product id/sn struct
1332 static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1333 struct ipr_vpd *vpd)
1335 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1338 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1339 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1341 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1342 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1344 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1345 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1347 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1351 * ipr_log_vpd - Log the passed VPD to the error log.
1352 * @vpd: vendor/product id/sn struct
1357 static void ipr_log_vpd(struct ipr_vpd *vpd)
1359 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1360 + IPR_SERIAL_NUM_LEN];
1362 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1363 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1365 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1366 ipr_err("Vendor/Product ID: %s\n", buffer);
1368 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1369 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1370 ipr_err(" Serial Number: %s\n", buffer);
1374 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1375 * @prefix: string to print at start of printk
1376 * @hostrcb: hostrcb pointer
1377 * @vpd: vendor/product id/sn/wwn struct
1382 static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1383 struct ipr_ext_vpd *vpd)
1385 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1386 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1387 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1391 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1392 * @vpd: vendor/product id/sn/wwn struct
1397 static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1399 ipr_log_vpd(&vpd->vpd);
1400 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1401 be32_to_cpu(vpd->wwid[1]));
1405 * ipr_log_enhanced_cache_error - Log a cache error.
1406 * @ioa_cfg: ioa config struct
1407 * @hostrcb: hostrcb struct
1412 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1413 struct ipr_hostrcb *hostrcb)
1415 struct ipr_hostrcb_type_12_error *error;
1418 error = &hostrcb->hcam.u.error64.u.type_12_error;
1420 error = &hostrcb->hcam.u.error.u.type_12_error;
1422 ipr_err("-----Current Configuration-----\n");
1423 ipr_err("Cache Directory Card Information:\n");
1424 ipr_log_ext_vpd(&error->ioa_vpd);
1425 ipr_err("Adapter Card Information:\n");
1426 ipr_log_ext_vpd(&error->cfc_vpd);
1428 ipr_err("-----Expected Configuration-----\n");
1429 ipr_err("Cache Directory Card Information:\n");
1430 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1431 ipr_err("Adapter Card Information:\n");
1432 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1434 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1435 be32_to_cpu(error->ioa_data[0]),
1436 be32_to_cpu(error->ioa_data[1]),
1437 be32_to_cpu(error->ioa_data[2]));
1441 * ipr_log_cache_error - Log a cache error.
1442 * @ioa_cfg: ioa config struct
1443 * @hostrcb: hostrcb struct
1448 static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1449 struct ipr_hostrcb *hostrcb)
1451 struct ipr_hostrcb_type_02_error *error =
1452 &hostrcb->hcam.u.error.u.type_02_error;
1454 ipr_err("-----Current Configuration-----\n");
1455 ipr_err("Cache Directory Card Information:\n");
1456 ipr_log_vpd(&error->ioa_vpd);
1457 ipr_err("Adapter Card Information:\n");
1458 ipr_log_vpd(&error->cfc_vpd);
1460 ipr_err("-----Expected Configuration-----\n");
1461 ipr_err("Cache Directory Card Information:\n");
1462 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1463 ipr_err("Adapter Card Information:\n");
1464 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1466 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1467 be32_to_cpu(error->ioa_data[0]),
1468 be32_to_cpu(error->ioa_data[1]),
1469 be32_to_cpu(error->ioa_data[2]));
1473 * ipr_log_enhanced_config_error - Log a configuration error.
1474 * @ioa_cfg: ioa config struct
1475 * @hostrcb: hostrcb struct
1480 static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1481 struct ipr_hostrcb *hostrcb)
1483 int errors_logged, i;
1484 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1485 struct ipr_hostrcb_type_13_error *error;
1487 error = &hostrcb->hcam.u.error.u.type_13_error;
1488 errors_logged = be32_to_cpu(error->errors_logged);
1490 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1491 be32_to_cpu(error->errors_detected), errors_logged);
1493 dev_entry = error->dev;
1495 for (i = 0; i < errors_logged; i++, dev_entry++) {
1498 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1499 ipr_log_ext_vpd(&dev_entry->vpd);
1501 ipr_err("-----New Device Information-----\n");
1502 ipr_log_ext_vpd(&dev_entry->new_vpd);
1504 ipr_err("Cache Directory Card Information:\n");
1505 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1507 ipr_err("Adapter Card Information:\n");
1508 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1513 * ipr_log_sis64_config_error - Log a device error.
1514 * @ioa_cfg: ioa config struct
1515 * @hostrcb: hostrcb struct
1520 static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521 struct ipr_hostrcb *hostrcb)
1523 int errors_logged, i;
1524 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1525 struct ipr_hostrcb_type_23_error *error;
1526 char buffer[IPR_MAX_RES_PATH_LENGTH];
1528 error = &hostrcb->hcam.u.error64.u.type_23_error;
1529 errors_logged = be32_to_cpu(error->errors_logged);
1531 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1532 be32_to_cpu(error->errors_detected), errors_logged);
1534 dev_entry = error->dev;
1536 for (i = 0; i < errors_logged; i++, dev_entry++) {
1539 ipr_err("Device %d : %s", i + 1,
1540 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1541 ipr_log_ext_vpd(&dev_entry->vpd);
1543 ipr_err("-----New Device Information-----\n");
1544 ipr_log_ext_vpd(&dev_entry->new_vpd);
1546 ipr_err("Cache Directory Card Information:\n");
1547 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1549 ipr_err("Adapter Card Information:\n");
1550 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1555 * ipr_log_config_error - Log a configuration error.
1556 * @ioa_cfg: ioa config struct
1557 * @hostrcb: hostrcb struct
1562 static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1563 struct ipr_hostrcb *hostrcb)
1565 int errors_logged, i;
1566 struct ipr_hostrcb_device_data_entry *dev_entry;
1567 struct ipr_hostrcb_type_03_error *error;
1569 error = &hostrcb->hcam.u.error.u.type_03_error;
1570 errors_logged = be32_to_cpu(error->errors_logged);
1572 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1573 be32_to_cpu(error->errors_detected), errors_logged);
1575 dev_entry = error->dev;
1577 for (i = 0; i < errors_logged; i++, dev_entry++) {
1580 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1581 ipr_log_vpd(&dev_entry->vpd);
1583 ipr_err("-----New Device Information-----\n");
1584 ipr_log_vpd(&dev_entry->new_vpd);
1586 ipr_err("Cache Directory Card Information:\n");
1587 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589 ipr_err("Adapter Card Information:\n");
1590 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1593 be32_to_cpu(dev_entry->ioa_data[0]),
1594 be32_to_cpu(dev_entry->ioa_data[1]),
1595 be32_to_cpu(dev_entry->ioa_data[2]),
1596 be32_to_cpu(dev_entry->ioa_data[3]),
1597 be32_to_cpu(dev_entry->ioa_data[4]));
1602 * ipr_log_enhanced_array_error - Log an array configuration error.
1603 * @ioa_cfg: ioa config struct
1604 * @hostrcb: hostrcb struct
1609 static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1610 struct ipr_hostrcb *hostrcb)
1613 struct ipr_hostrcb_type_14_error *error;
1614 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1615 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1617 error = &hostrcb->hcam.u.error.u.type_14_error;
1621 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1622 error->protection_level,
1623 ioa_cfg->host->host_no,
1624 error->last_func_vset_res_addr.bus,
1625 error->last_func_vset_res_addr.target,
1626 error->last_func_vset_res_addr.lun);
1630 array_entry = error->array_member;
1631 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1632 sizeof(error->array_member));
1634 for (i = 0; i < num_entries; i++, array_entry++) {
1635 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1638 if (be32_to_cpu(error->exposed_mode_adn) == i)
1639 ipr_err("Exposed Array Member %d:\n", i);
1641 ipr_err("Array Member %d:\n", i);
1643 ipr_log_ext_vpd(&array_entry->vpd);
1644 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1645 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1646 "Expected Location");
1653 * ipr_log_array_error - Log an array configuration error.
1654 * @ioa_cfg: ioa config struct
1655 * @hostrcb: hostrcb struct
1660 static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1661 struct ipr_hostrcb *hostrcb)
1664 struct ipr_hostrcb_type_04_error *error;
1665 struct ipr_hostrcb_array_data_entry *array_entry;
1666 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1668 error = &hostrcb->hcam.u.error.u.type_04_error;
1672 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1673 error->protection_level,
1674 ioa_cfg->host->host_no,
1675 error->last_func_vset_res_addr.bus,
1676 error->last_func_vset_res_addr.target,
1677 error->last_func_vset_res_addr.lun);
1681 array_entry = error->array_member;
1683 for (i = 0; i < 18; i++) {
1684 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1687 if (be32_to_cpu(error->exposed_mode_adn) == i)
1688 ipr_err("Exposed Array Member %d:\n", i);
1690 ipr_err("Array Member %d:\n", i);
1692 ipr_log_vpd(&array_entry->vpd);
1694 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1695 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1696 "Expected Location");
1701 array_entry = error->array_member2;
1708 * ipr_log_hex_data - Log additional hex IOA error data.
1709 * @ioa_cfg: ioa config struct
1710 * @data: IOA error data
1716 static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1723 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1724 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1726 for (i = 0; i < len / 4; i += 4) {
1727 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1728 be32_to_cpu(data[i]),
1729 be32_to_cpu(data[i+1]),
1730 be32_to_cpu(data[i+2]),
1731 be32_to_cpu(data[i+3]));
1736 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1737 * @ioa_cfg: ioa config struct
1738 * @hostrcb: hostrcb struct
1743 static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1744 struct ipr_hostrcb *hostrcb)
1746 struct ipr_hostrcb_type_17_error *error;
1749 error = &hostrcb->hcam.u.error64.u.type_17_error;
1751 error = &hostrcb->hcam.u.error.u.type_17_error;
1753 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1754 strim(error->failure_reason);
1756 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1757 be32_to_cpu(hostrcb->hcam.u.error.prc));
1758 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1759 ipr_log_hex_data(ioa_cfg, error->data,
1760 be32_to_cpu(hostrcb->hcam.length) -
1761 (offsetof(struct ipr_hostrcb_error, u) +
1762 offsetof(struct ipr_hostrcb_type_17_error, data)));
1766 * ipr_log_dual_ioa_error - Log a dual adapter error.
1767 * @ioa_cfg: ioa config struct
1768 * @hostrcb: hostrcb struct
1773 static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1774 struct ipr_hostrcb *hostrcb)
1776 struct ipr_hostrcb_type_07_error *error;
1778 error = &hostrcb->hcam.u.error.u.type_07_error;
1779 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1780 strim(error->failure_reason);
1782 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1783 be32_to_cpu(hostrcb->hcam.u.error.prc));
1784 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1785 ipr_log_hex_data(ioa_cfg, error->data,
1786 be32_to_cpu(hostrcb->hcam.length) -
1787 (offsetof(struct ipr_hostrcb_error, u) +
1788 offsetof(struct ipr_hostrcb_type_07_error, data)));
1791 static const struct {
1794 } path_active_desc[] = {
1795 { IPR_PATH_NO_INFO, "Path" },
1796 { IPR_PATH_ACTIVE, "Active path" },
1797 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
1800 static const struct {
1803 } path_state_desc[] = {
1804 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1805 { IPR_PATH_HEALTHY, "is healthy" },
1806 { IPR_PATH_DEGRADED, "is degraded" },
1807 { IPR_PATH_FAILED, "is failed" }
1811 * ipr_log_fabric_path - Log a fabric path error
1812 * @hostrcb: hostrcb struct
1813 * @fabric: fabric descriptor
1818 static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1819 struct ipr_hostrcb_fabric_desc *fabric)
1822 u8 path_state = fabric->path_state;
1823 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1824 u8 state = path_state & IPR_PATH_STATE_MASK;
1826 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1827 if (path_active_desc[i].active != active)
1830 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1831 if (path_state_desc[j].state != state)
1834 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1835 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1836 path_active_desc[i].desc, path_state_desc[j].desc,
1838 } else if (fabric->cascaded_expander == 0xff) {
1839 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1840 path_active_desc[i].desc, path_state_desc[j].desc,
1841 fabric->ioa_port, fabric->phy);
1842 } else if (fabric->phy == 0xff) {
1843 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1844 path_active_desc[i].desc, path_state_desc[j].desc,
1845 fabric->ioa_port, fabric->cascaded_expander);
1847 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1848 path_active_desc[i].desc, path_state_desc[j].desc,
1849 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1855 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1856 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1860 * ipr_log64_fabric_path - Log a fabric path error
1861 * @hostrcb: hostrcb struct
1862 * @fabric: fabric descriptor
1867 static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1868 struct ipr_hostrcb64_fabric_desc *fabric)
1871 u8 path_state = fabric->path_state;
1872 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1873 u8 state = path_state & IPR_PATH_STATE_MASK;
1874 char buffer[IPR_MAX_RES_PATH_LENGTH];
1876 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1877 if (path_active_desc[i].active != active)
1880 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1881 if (path_state_desc[j].state != state)
1884 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1885 path_active_desc[i].desc, path_state_desc[j].desc,
1886 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1891 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1892 ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1895 static const struct {
1898 } path_type_desc[] = {
1899 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
1900 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
1901 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1902 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1905 static const struct {
1908 } path_status_desc[] = {
1909 { IPR_PATH_CFG_NO_PROB, "Functional" },
1910 { IPR_PATH_CFG_DEGRADED, "Degraded" },
1911 { IPR_PATH_CFG_FAILED, "Failed" },
1912 { IPR_PATH_CFG_SUSPECT, "Suspect" },
1913 { IPR_PATH_NOT_DETECTED, "Missing" },
1914 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1917 static const char *link_rate[] = {
1920 "phy reset problem",
1937 * ipr_log_path_elem - Log a fabric path element.
1938 * @hostrcb: hostrcb struct
1939 * @cfg: fabric path element struct
1944 static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1945 struct ipr_hostrcb_config_element *cfg)
1948 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1949 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1951 if (type == IPR_PATH_CFG_NOT_EXIST)
1954 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1955 if (path_type_desc[i].type != type)
1958 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1959 if (path_status_desc[j].status != status)
1962 if (type == IPR_PATH_CFG_IOA_PORT) {
1963 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
1964 path_status_desc[j].desc, path_type_desc[i].desc,
1965 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1966 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1968 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
1969 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
1970 path_status_desc[j].desc, path_type_desc[i].desc,
1971 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1972 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1973 } else if (cfg->cascaded_expander == 0xff) {
1974 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
1975 "WWN=%08X%08X\n", path_status_desc[j].desc,
1976 path_type_desc[i].desc, cfg->phy,
1977 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1978 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1979 } else if (cfg->phy == 0xff) {
1980 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
1981 "WWN=%08X%08X\n", path_status_desc[j].desc,
1982 path_type_desc[i].desc, cfg->cascaded_expander,
1983 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1984 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1986 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
1987 "WWN=%08X%08X\n", path_status_desc[j].desc,
1988 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
1989 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
1990 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
1997 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
1998 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
1999 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2000 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2004 * ipr_log64_path_elem - Log a fabric path element.
2005 * @hostrcb: hostrcb struct
2006 * @cfg: fabric path element struct
2011 static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2012 struct ipr_hostrcb64_config_element *cfg)
2015 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2016 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2017 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2018 char buffer[IPR_MAX_RES_PATH_LENGTH];
2020 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2023 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2024 if (path_type_desc[i].type != type)
2027 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2028 if (path_status_desc[j].status != status)
2031 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2032 path_status_desc[j].desc, path_type_desc[i].desc,
2033 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2034 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2035 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2039 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2040 "WWN=%08X%08X\n", cfg->type_status,
2041 ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2042 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2043 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2047 * ipr_log_fabric_error - Log a fabric error.
2048 * @ioa_cfg: ioa config struct
2049 * @hostrcb: hostrcb struct
2054 static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2055 struct ipr_hostrcb *hostrcb)
2057 struct ipr_hostrcb_type_20_error *error;
2058 struct ipr_hostrcb_fabric_desc *fabric;
2059 struct ipr_hostrcb_config_element *cfg;
2062 error = &hostrcb->hcam.u.error.u.type_20_error;
2063 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2064 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2066 add_len = be32_to_cpu(hostrcb->hcam.length) -
2067 (offsetof(struct ipr_hostrcb_error, u) +
2068 offsetof(struct ipr_hostrcb_type_20_error, desc));
2070 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2071 ipr_log_fabric_path(hostrcb, fabric);
2072 for_each_fabric_cfg(fabric, cfg)
2073 ipr_log_path_elem(hostrcb, cfg);
2075 add_len -= be16_to_cpu(fabric->length);
2076 fabric = (struct ipr_hostrcb_fabric_desc *)
2077 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2080 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2084 * ipr_log_sis64_array_error - Log a sis64 array error.
2085 * @ioa_cfg: ioa config struct
2086 * @hostrcb: hostrcb struct
2091 static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2092 struct ipr_hostrcb *hostrcb)
2095 struct ipr_hostrcb_type_24_error *error;
2096 struct ipr_hostrcb64_array_data_entry *array_entry;
2097 char buffer[IPR_MAX_RES_PATH_LENGTH];
2098 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2100 error = &hostrcb->hcam.u.error64.u.type_24_error;
2104 ipr_err("RAID %s Array Configuration: %s\n",
2105 error->protection_level,
2106 ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2110 array_entry = error->array_member;
2111 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2112 sizeof(error->array_member));
2114 for (i = 0; i < num_entries; i++, array_entry++) {
2116 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2119 if (error->exposed_mode_adn == i)
2120 ipr_err("Exposed Array Member %d:\n", i);
2122 ipr_err("Array Member %d:\n", i);
2124 ipr_err("Array Member %d:\n", i);
2125 ipr_log_ext_vpd(&array_entry->vpd);
2126 ipr_err("Current Location: %s",
2127 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2128 ipr_err("Expected Location: %s",
2129 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2136 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2137 * @ioa_cfg: ioa config struct
2138 * @hostrcb: hostrcb struct
2143 static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2144 struct ipr_hostrcb *hostrcb)
2146 struct ipr_hostrcb_type_30_error *error;
2147 struct ipr_hostrcb64_fabric_desc *fabric;
2148 struct ipr_hostrcb64_config_element *cfg;
2151 error = &hostrcb->hcam.u.error64.u.type_30_error;
2153 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2154 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2156 add_len = be32_to_cpu(hostrcb->hcam.length) -
2157 (offsetof(struct ipr_hostrcb64_error, u) +
2158 offsetof(struct ipr_hostrcb_type_30_error, desc));
2160 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2161 ipr_log64_fabric_path(hostrcb, fabric);
2162 for_each_fabric_cfg(fabric, cfg)
2163 ipr_log64_path_elem(hostrcb, cfg);
2165 add_len -= be16_to_cpu(fabric->length);
2166 fabric = (struct ipr_hostrcb64_fabric_desc *)
2167 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2170 ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2174 * ipr_log_generic_error - Log an adapter error.
2175 * @ioa_cfg: ioa config struct
2176 * @hostrcb: hostrcb struct
2181 static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2182 struct ipr_hostrcb *hostrcb)
2184 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2185 be32_to_cpu(hostrcb->hcam.length));
2189 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2192 * This function will return the index of into the ipr_error_table
2193 * for the specified IOASC. If the IOASC is not in the table,
2194 * 0 will be returned, which points to the entry used for unknown errors.
2197 * index into the ipr_error_table
2199 static u32 ipr_get_error(u32 ioasc)
2203 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2204 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2211 * ipr_handle_log_data - Log an adapter error.
2212 * @ioa_cfg: ioa config struct
2213 * @hostrcb: hostrcb struct
2215 * This function logs an adapter error to the system.
2220 static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2221 struct ipr_hostrcb *hostrcb)
2226 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2229 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2230 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2233 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2235 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2237 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2238 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2239 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2240 scsi_report_bus_reset(ioa_cfg->host,
2241 hostrcb->hcam.u.error.fd_res_addr.bus);
2244 error_index = ipr_get_error(ioasc);
2246 if (!ipr_error_table[error_index].log_hcam)
2249 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2251 /* Set indication we have logged an error */
2252 ioa_cfg->errors_logged++;
2254 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2256 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2257 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2259 switch (hostrcb->hcam.overlay_id) {
2260 case IPR_HOST_RCB_OVERLAY_ID_2:
2261 ipr_log_cache_error(ioa_cfg, hostrcb);
2263 case IPR_HOST_RCB_OVERLAY_ID_3:
2264 ipr_log_config_error(ioa_cfg, hostrcb);
2266 case IPR_HOST_RCB_OVERLAY_ID_4:
2267 case IPR_HOST_RCB_OVERLAY_ID_6:
2268 ipr_log_array_error(ioa_cfg, hostrcb);
2270 case IPR_HOST_RCB_OVERLAY_ID_7:
2271 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2273 case IPR_HOST_RCB_OVERLAY_ID_12:
2274 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2276 case IPR_HOST_RCB_OVERLAY_ID_13:
2277 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2279 case IPR_HOST_RCB_OVERLAY_ID_14:
2280 case IPR_HOST_RCB_OVERLAY_ID_16:
2281 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2283 case IPR_HOST_RCB_OVERLAY_ID_17:
2284 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2286 case IPR_HOST_RCB_OVERLAY_ID_20:
2287 ipr_log_fabric_error(ioa_cfg, hostrcb);
2289 case IPR_HOST_RCB_OVERLAY_ID_23:
2290 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2292 case IPR_HOST_RCB_OVERLAY_ID_24:
2293 case IPR_HOST_RCB_OVERLAY_ID_26:
2294 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2296 case IPR_HOST_RCB_OVERLAY_ID_30:
2297 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2299 case IPR_HOST_RCB_OVERLAY_ID_1:
2300 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2302 ipr_log_generic_error(ioa_cfg, hostrcb);
2308 * ipr_process_error - Op done function for an adapter error log.
2309 * @ipr_cmd: ipr command struct
2311 * This function is the op done function for an error log host
2312 * controlled async from the adapter. It will log the error and
2313 * send the HCAM back to the adapter.
2318 static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2320 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2321 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2322 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
2326 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2328 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2330 list_del(&hostrcb->queue);
2331 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2334 ipr_handle_log_data(ioa_cfg, hostrcb);
2335 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2336 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2337 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2338 dev_err(&ioa_cfg->pdev->dev,
2339 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2342 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2346 * ipr_timeout - An internally generated op has timed out.
2347 * @ipr_cmd: ipr command struct
2349 * This function blocks host requests and initiates an
2355 static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2357 unsigned long lock_flags = 0;
2358 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2361 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2363 ioa_cfg->errors_logged++;
2364 dev_err(&ioa_cfg->pdev->dev,
2365 "Adapter being reset due to command timeout.\n");
2367 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2368 ioa_cfg->sdt_state = GET_DUMP;
2370 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2371 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2373 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2378 * ipr_oper_timeout - Adapter timed out transitioning to operational
2379 * @ipr_cmd: ipr command struct
2381 * This function blocks host requests and initiates an
2387 static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2389 unsigned long lock_flags = 0;
2390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2393 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2395 ioa_cfg->errors_logged++;
2396 dev_err(&ioa_cfg->pdev->dev,
2397 "Adapter timed out transitioning to operational.\n");
2399 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2400 ioa_cfg->sdt_state = GET_DUMP;
2402 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2404 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2405 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2408 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2413 * ipr_reset_reload - Reset/Reload the IOA
2414 * @ioa_cfg: ioa config struct
2415 * @shutdown_type: shutdown type
2417 * This function resets the adapter and re-initializes it.
2418 * This function assumes that all new host commands have been stopped.
2422 static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2423 enum ipr_shutdown_type shutdown_type)
2425 if (!ioa_cfg->in_reset_reload)
2426 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2428 spin_unlock_irq(ioa_cfg->host->host_lock);
2429 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2430 spin_lock_irq(ioa_cfg->host->host_lock);
2432 /* If we got hit with a host reset while we were already resetting
2433 the adapter for some reason, and the reset failed. */
2434 if (ioa_cfg->ioa_is_dead) {
2443 * ipr_find_ses_entry - Find matching SES in SES table
2444 * @res: resource entry struct of SES
2447 * pointer to SES table entry / NULL on failure
2449 static const struct ipr_ses_table_entry *
2450 ipr_find_ses_entry(struct ipr_resource_entry *res)
2453 struct ipr_std_inq_vpids *vpids;
2454 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2456 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2457 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2458 if (ste->compare_product_id_byte[j] == 'X') {
2459 vpids = &res->std_inq_data.vpids;
2460 if (vpids->product_id[j] == ste->product_id[j])
2468 if (matches == IPR_PROD_ID_LEN)
2476 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2477 * @ioa_cfg: ioa config struct
2479 * @bus_width: bus width
2482 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2483 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2484 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2485 * max 160MHz = max 320MB/sec).
2487 static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2489 struct ipr_resource_entry *res;
2490 const struct ipr_ses_table_entry *ste;
2491 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2493 /* Loop through each config table entry in the config table buffer */
2494 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2495 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2498 if (bus != res->bus)
2501 if (!(ste = ipr_find_ses_entry(res)))
2504 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2507 return max_xfer_rate;
2511 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2512 * @ioa_cfg: ioa config struct
2513 * @max_delay: max delay in micro-seconds to wait
2515 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2518 * 0 on success / other on failure
2520 static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2522 volatile u32 pcii_reg;
2525 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2526 while (delay < max_delay) {
2527 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2529 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2532 /* udelay cannot be used if delay is more than a few milliseconds */
2533 if ((delay / 1000) > MAX_UDELAY_MS)
2534 mdelay(delay / 1000);
2544 * ipr_get_sis64_dump_data_section - Dump IOA memory
2545 * @ioa_cfg: ioa config struct
2546 * @start_addr: adapter address to dump
2547 * @dest: destination kernel buffer
2548 * @length_in_words: length to dump in 4 byte words
2553 static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2555 __be32 *dest, u32 length_in_words)
2559 for (i = 0; i < length_in_words; i++) {
2560 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2561 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2569 * ipr_get_ldump_data_section - Dump IOA memory
2570 * @ioa_cfg: ioa config struct
2571 * @start_addr: adapter address to dump
2572 * @dest: destination kernel buffer
2573 * @length_in_words: length to dump in 4 byte words
2576 * 0 on success / -EIO on failure
2578 static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2580 __be32 *dest, u32 length_in_words)
2582 volatile u32 temp_pcii_reg;
2586 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2587 dest, length_in_words);
2589 /* Write IOA interrupt reg starting LDUMP state */
2590 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2591 ioa_cfg->regs.set_uproc_interrupt_reg32);
2593 /* Wait for IO debug acknowledge */
2594 if (ipr_wait_iodbg_ack(ioa_cfg,
2595 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2596 dev_err(&ioa_cfg->pdev->dev,
2597 "IOA dump long data transfer timeout\n");
2601 /* Signal LDUMP interlocked - clear IO debug ack */
2602 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2603 ioa_cfg->regs.clr_interrupt_reg);
2605 /* Write Mailbox with starting address */
2606 writel(start_addr, ioa_cfg->ioa_mailbox);
2608 /* Signal address valid - clear IOA Reset alert */
2609 writel(IPR_UPROCI_RESET_ALERT,
2610 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2612 for (i = 0; i < length_in_words; i++) {
2613 /* Wait for IO debug acknowledge */
2614 if (ipr_wait_iodbg_ack(ioa_cfg,
2615 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2616 dev_err(&ioa_cfg->pdev->dev,
2617 "IOA dump short data transfer timeout\n");
2621 /* Read data from mailbox and increment destination pointer */
2622 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2625 /* For all but the last word of data, signal data received */
2626 if (i < (length_in_words - 1)) {
2627 /* Signal dump data received - Clear IO debug Ack */
2628 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2629 ioa_cfg->regs.clr_interrupt_reg);
2633 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2634 writel(IPR_UPROCI_RESET_ALERT,
2635 ioa_cfg->regs.set_uproc_interrupt_reg32);
2637 writel(IPR_UPROCI_IO_DEBUG_ALERT,
2638 ioa_cfg->regs.clr_uproc_interrupt_reg32);
2640 /* Signal dump data received - Clear IO debug Ack */
2641 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2642 ioa_cfg->regs.clr_interrupt_reg);
2644 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2645 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2647 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2649 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2659 #ifdef CONFIG_SCSI_IPR_DUMP
2661 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2662 * @ioa_cfg: ioa config struct
2663 * @pci_address: adapter address
2664 * @length: length of data to copy
2666 * Copy data from PCI adapter to kernel buffer.
2667 * Note: length MUST be a 4 byte multiple
2669 * 0 on success / other on failure
2671 static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2672 unsigned long pci_address, u32 length)
2674 int bytes_copied = 0;
2675 int cur_len, rc, rem_len, rem_page_len;
2677 unsigned long lock_flags = 0;
2678 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2680 while (bytes_copied < length &&
2681 (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2682 if (ioa_dump->page_offset >= PAGE_SIZE ||
2683 ioa_dump->page_offset == 0) {
2684 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2688 return bytes_copied;
2691 ioa_dump->page_offset = 0;
2692 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2693 ioa_dump->next_page_index++;
2695 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2697 rem_len = length - bytes_copied;
2698 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2699 cur_len = min(rem_len, rem_page_len);
2701 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2702 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2705 rc = ipr_get_ldump_data_section(ioa_cfg,
2706 pci_address + bytes_copied,
2707 &page[ioa_dump->page_offset / 4],
2708 (cur_len / sizeof(u32)));
2710 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2713 ioa_dump->page_offset += cur_len;
2714 bytes_copied += cur_len;
2722 return bytes_copied;
2726 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2727 * @hdr: dump entry header struct
2732 static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2734 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2736 hdr->offset = sizeof(*hdr);
2737 hdr->status = IPR_DUMP_STATUS_SUCCESS;
2741 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2742 * @ioa_cfg: ioa config struct
2743 * @driver_dump: driver dump struct
2748 static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2749 struct ipr_driver_dump *driver_dump)
2751 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2753 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2754 driver_dump->ioa_type_entry.hdr.len =
2755 sizeof(struct ipr_dump_ioa_type_entry) -
2756 sizeof(struct ipr_dump_entry_header);
2757 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2758 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2759 driver_dump->ioa_type_entry.type = ioa_cfg->type;
2760 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2761 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2762 ucode_vpd->minor_release[1];
2763 driver_dump->hdr.num_entries++;
2767 * ipr_dump_version_data - Fill in the driver version in the dump.
2768 * @ioa_cfg: ioa config struct
2769 * @driver_dump: driver dump struct
2774 static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2775 struct ipr_driver_dump *driver_dump)
2777 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2778 driver_dump->version_entry.hdr.len =
2779 sizeof(struct ipr_dump_version_entry) -
2780 sizeof(struct ipr_dump_entry_header);
2781 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2782 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2783 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2784 driver_dump->hdr.num_entries++;
2788 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2789 * @ioa_cfg: ioa config struct
2790 * @driver_dump: driver dump struct
2795 static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2796 struct ipr_driver_dump *driver_dump)
2798 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2799 driver_dump->trace_entry.hdr.len =
2800 sizeof(struct ipr_dump_trace_entry) -
2801 sizeof(struct ipr_dump_entry_header);
2802 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2804 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2805 driver_dump->hdr.num_entries++;
2809 * ipr_dump_location_data - Fill in the IOA location in the dump.
2810 * @ioa_cfg: ioa config struct
2811 * @driver_dump: driver dump struct
2816 static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2817 struct ipr_driver_dump *driver_dump)
2819 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2820 driver_dump->location_entry.hdr.len =
2821 sizeof(struct ipr_dump_location_entry) -
2822 sizeof(struct ipr_dump_entry_header);
2823 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2824 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2825 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2826 driver_dump->hdr.num_entries++;
2830 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2831 * @ioa_cfg: ioa config struct
2832 * @dump: dump struct
2837 static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2839 unsigned long start_addr, sdt_word;
2840 unsigned long lock_flags = 0;
2841 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2842 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2843 u32 num_entries, start_off, end_off;
2844 u32 bytes_to_copy, bytes_copied, rc;
2845 struct ipr_sdt *sdt;
2851 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2853 if (ioa_cfg->sdt_state != GET_DUMP) {
2854 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2858 start_addr = readl(ioa_cfg->ioa_mailbox);
2860 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2861 dev_err(&ioa_cfg->pdev->dev,
2862 "Invalid dump table format: %lx\n", start_addr);
2863 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2867 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2869 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2871 /* Initialize the overall dump header */
2872 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2873 driver_dump->hdr.num_entries = 1;
2874 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2875 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2876 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2877 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2879 ipr_dump_version_data(ioa_cfg, driver_dump);
2880 ipr_dump_location_data(ioa_cfg, driver_dump);
2881 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2882 ipr_dump_trace_data(ioa_cfg, driver_dump);
2884 /* Update dump_header */
2885 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2887 /* IOA Dump entry */
2888 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2889 ioa_dump->hdr.len = 0;
2890 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2891 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2893 /* First entries in sdt are actually a list of dump addresses and
2894 lengths to gather the real dump data. sdt represents the pointer
2895 to the ioa generated dump table. Dump data will be extracted based
2896 on entries in this table */
2897 sdt = &ioa_dump->sdt;
2899 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2900 sizeof(struct ipr_sdt) / sizeof(__be32));
2902 /* Smart Dump table is ready to use and the first entry is valid */
2903 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2904 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
2907 rc, be32_to_cpu(sdt->hdr.state));
2908 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2909 ioa_cfg->sdt_state = DUMP_OBTAINED;
2910 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2914 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2916 if (num_entries > IPR_NUM_SDT_ENTRIES)
2917 num_entries = IPR_NUM_SDT_ENTRIES;
2919 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2921 for (i = 0; i < num_entries; i++) {
2922 if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2923 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2927 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2928 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2930 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2932 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2933 end_off = be32_to_cpu(sdt->entry[i].end_token);
2935 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2936 bytes_to_copy = end_off - start_off;
2941 if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2942 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2946 /* Copy data from adapter to driver buffers */
2947 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2950 ioa_dump->hdr.len += bytes_copied;
2952 if (bytes_copied != bytes_to_copy) {
2953 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2960 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2962 /* Update dump_header */
2963 driver_dump->hdr.len += ioa_dump->hdr.len;
2965 ioa_cfg->sdt_state = DUMP_OBTAINED;
2970 #define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2974 * ipr_release_dump - Free adapter dump memory
2975 * @kref: kref struct
2980 static void ipr_release_dump(struct kref *kref)
2982 struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2983 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2984 unsigned long lock_flags = 0;
2988 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2989 ioa_cfg->dump = NULL;
2990 ioa_cfg->sdt_state = INACTIVE;
2991 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2993 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2994 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3001 * ipr_worker_thread - Worker thread
3002 * @work: ioa config struct
3004 * Called at task level from a work thread. This function takes care
3005 * of adding and removing device from the mid-layer as configuration
3006 * changes are detected by the adapter.
3011 static void ipr_worker_thread(struct work_struct *work)
3013 unsigned long lock_flags;
3014 struct ipr_resource_entry *res;
3015 struct scsi_device *sdev;
3016 struct ipr_dump *dump;
3017 struct ipr_ioa_cfg *ioa_cfg =
3018 container_of(work, struct ipr_ioa_cfg, work_q);
3019 u8 bus, target, lun;
3023 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3025 if (ioa_cfg->sdt_state == GET_DUMP) {
3026 dump = ioa_cfg->dump;
3028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3031 kref_get(&dump->kref);
3032 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3033 ipr_get_ioa_dump(ioa_cfg, dump);
3034 kref_put(&dump->kref, ipr_release_dump);
3036 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3037 if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3038 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3039 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3046 if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3051 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3052 if (res->del_from_ml && res->sdev) {
3055 if (!scsi_device_get(sdev)) {
3056 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3057 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3058 scsi_remove_device(sdev);
3059 scsi_device_put(sdev);
3060 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3067 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3068 if (res->add_to_ml) {
3070 target = res->target;
3073 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3074 scsi_add_device(ioa_cfg->host, bus, target, lun);
3075 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3081 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3085 #ifdef CONFIG_SCSI_IPR_TRACE
3087 * ipr_read_trace - Dump the adapter trace
3088 * @kobj: kobject struct
3089 * @bin_attr: bin_attribute struct
3092 * @count: buffer size
3095 * number of bytes printed to buffer
3097 static ssize_t ipr_read_trace(struct kobject *kobj,
3098 struct bin_attribute *bin_attr,
3099 char *buf, loff_t off, size_t count)
3101 struct device *dev = container_of(kobj, struct device, kobj);
3102 struct Scsi_Host *shost = class_to_shost(dev);
3103 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3104 unsigned long lock_flags = 0;
3107 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3108 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3110 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3115 static struct bin_attribute ipr_trace_attr = {
3121 .read = ipr_read_trace,
3126 * ipr_show_fw_version - Show the firmware version
3127 * @dev: class device struct
3131 * number of bytes printed to buffer
3133 static ssize_t ipr_show_fw_version(struct device *dev,
3134 struct device_attribute *attr, char *buf)
3136 struct Scsi_Host *shost = class_to_shost(dev);
3137 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3138 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3139 unsigned long lock_flags = 0;
3142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3143 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3144 ucode_vpd->major_release, ucode_vpd->card_type,
3145 ucode_vpd->minor_release[0],
3146 ucode_vpd->minor_release[1]);
3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3151 static struct device_attribute ipr_fw_version_attr = {
3153 .name = "fw_version",
3156 .show = ipr_show_fw_version,
3160 * ipr_show_log_level - Show the adapter's error logging level
3161 * @dev: class device struct
3165 * number of bytes printed to buffer
3167 static ssize_t ipr_show_log_level(struct device *dev,
3168 struct device_attribute *attr, char *buf)
3170 struct Scsi_Host *shost = class_to_shost(dev);
3171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3172 unsigned long lock_flags = 0;
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3182 * ipr_store_log_level - Change the adapter's error logging level
3183 * @dev: class device struct
3187 * number of bytes printed to buffer
3189 static ssize_t ipr_store_log_level(struct device *dev,
3190 struct device_attribute *attr,
3191 const char *buf, size_t count)
3193 struct Scsi_Host *shost = class_to_shost(dev);
3194 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3195 unsigned long lock_flags = 0;
3197 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3198 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3199 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3203 static struct device_attribute ipr_log_level_attr = {
3205 .name = "log_level",
3206 .mode = S_IRUGO | S_IWUSR,
3208 .show = ipr_show_log_level,
3209 .store = ipr_store_log_level
3213 * ipr_store_diagnostics - IOA Diagnostics interface
3214 * @dev: device struct
3216 * @count: buffer size
3218 * This function will reset the adapter and wait a reasonable
3219 * amount of time for any errors that the adapter might log.
3222 * count on success / other on failure
3224 static ssize_t ipr_store_diagnostics(struct device *dev,
3225 struct device_attribute *attr,
3226 const char *buf, size_t count)
3228 struct Scsi_Host *shost = class_to_shost(dev);
3229 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3230 unsigned long lock_flags = 0;
3233 if (!capable(CAP_SYS_ADMIN))
3236 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3237 while(ioa_cfg->in_reset_reload) {
3238 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3239 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3243 ioa_cfg->errors_logged = 0;
3244 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3246 if (ioa_cfg->in_reset_reload) {
3247 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3248 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3250 /* Wait for a second for any errors to be logged */
3253 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3258 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3265 static struct device_attribute ipr_diagnostics_attr = {
3267 .name = "run_diagnostics",
3270 .store = ipr_store_diagnostics
3274 * ipr_show_adapter_state - Show the adapter's state
3275 * @class_dev: device struct
3279 * number of bytes printed to buffer
3281 static ssize_t ipr_show_adapter_state(struct device *dev,
3282 struct device_attribute *attr, char *buf)
3284 struct Scsi_Host *shost = class_to_shost(dev);
3285 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3286 unsigned long lock_flags = 0;
3289 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3290 if (ioa_cfg->ioa_is_dead)
3291 len = snprintf(buf, PAGE_SIZE, "offline\n");
3293 len = snprintf(buf, PAGE_SIZE, "online\n");
3294 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3299 * ipr_store_adapter_state - Change adapter state
3300 * @dev: device struct
3302 * @count: buffer size
3304 * This function will change the adapter's state.
3307 * count on success / other on failure
3309 static ssize_t ipr_store_adapter_state(struct device *dev,
3310 struct device_attribute *attr,
3311 const char *buf, size_t count)
3313 struct Scsi_Host *shost = class_to_shost(dev);
3314 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3315 unsigned long lock_flags;
3318 if (!capable(CAP_SYS_ADMIN))
3321 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3322 if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3323 ioa_cfg->ioa_is_dead = 0;
3324 ioa_cfg->reset_retries = 0;
3325 ioa_cfg->in_ioa_bringdown = 0;
3326 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3334 static struct device_attribute ipr_ioa_state_attr = {
3336 .name = "online_state",
3337 .mode = S_IRUGO | S_IWUSR,
3339 .show = ipr_show_adapter_state,
3340 .store = ipr_store_adapter_state
3344 * ipr_store_reset_adapter - Reset the adapter
3345 * @dev: device struct
3347 * @count: buffer size
3349 * This function will reset the adapter.
3352 * count on success / other on failure
3354 static ssize_t ipr_store_reset_adapter(struct device *dev,
3355 struct device_attribute *attr,
3356 const char *buf, size_t count)
3358 struct Scsi_Host *shost = class_to_shost(dev);
3359 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3360 unsigned long lock_flags;
3363 if (!capable(CAP_SYS_ADMIN))
3366 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3367 if (!ioa_cfg->in_reset_reload)
3368 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3370 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3375 static struct device_attribute ipr_ioa_reset_attr = {
3377 .name = "reset_host",
3380 .store = ipr_store_reset_adapter
3384 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3385 * @buf_len: buffer length
3387 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3388 * list to use for microcode download
3391 * pointer to sglist / NULL on failure
3393 static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3395 int sg_size, order, bsize_elem, num_elem, i, j;
3396 struct ipr_sglist *sglist;
3397 struct scatterlist *scatterlist;
3400 /* Get the minimum size per scatter/gather element */
3401 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3403 /* Get the actual size per element */
3404 order = get_order(sg_size);
3406 /* Determine the actual number of bytes per element */
3407 bsize_elem = PAGE_SIZE * (1 << order);
3409 /* Determine the actual number of sg entries needed */
3410 if (buf_len % bsize_elem)
3411 num_elem = (buf_len / bsize_elem) + 1;
3413 num_elem = buf_len / bsize_elem;
3415 /* Allocate a scatter/gather list for the DMA */
3416 sglist = kzalloc(sizeof(struct ipr_sglist) +
3417 (sizeof(struct scatterlist) * (num_elem - 1)),
3420 if (sglist == NULL) {
3425 scatterlist = sglist->scatterlist;
3426 sg_init_table(scatterlist, num_elem);
3428 sglist->order = order;
3429 sglist->num_sg = num_elem;
3431 /* Allocate a bunch of sg elements */
3432 for (i = 0; i < num_elem; i++) {
3433 page = alloc_pages(GFP_KERNEL, order);
3437 /* Free up what we already allocated */
3438 for (j = i - 1; j >= 0; j--)
3439 __free_pages(sg_page(&scatterlist[j]), order);
3444 sg_set_page(&scatterlist[i], page, 0, 0);
3451 * ipr_free_ucode_buffer - Frees a microcode download buffer
3452 * @p_dnld: scatter/gather list pointer
3454 * Free a DMA'able ucode download buffer previously allocated with
3455 * ipr_alloc_ucode_buffer
3460 static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3464 for (i = 0; i < sglist->num_sg; i++)
3465 __free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3471 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3472 * @sglist: scatter/gather list pointer
3473 * @buffer: buffer pointer
3474 * @len: buffer length
3476 * Copy a microcode image from a user buffer into a buffer allocated by
3477 * ipr_alloc_ucode_buffer
3480 * 0 on success / other on failure
3482 static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3483 u8 *buffer, u32 len)
3485 int bsize_elem, i, result = 0;
3486 struct scatterlist *scatterlist;
3489 /* Determine the actual number of bytes per element */
3490 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3492 scatterlist = sglist->scatterlist;
3494 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3495 struct page *page = sg_page(&scatterlist[i]);
3498 memcpy(kaddr, buffer, bsize_elem);
3501 scatterlist[i].length = bsize_elem;
3509 if (len % bsize_elem) {
3510 struct page *page = sg_page(&scatterlist[i]);
3513 memcpy(kaddr, buffer, len % bsize_elem);
3516 scatterlist[i].length = len % bsize_elem;
3519 sglist->buffer_len = len;
3524 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3525 * @ipr_cmd: ipr command struct
3526 * @sglist: scatter/gather list
3528 * Builds a microcode download IOA data list (IOADL).
3531 static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3532 struct ipr_sglist *sglist)
3534 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3535 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3536 struct scatterlist *scatterlist = sglist->scatterlist;
3539 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3540 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3541 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3544 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3545 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3546 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3547 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3548 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3551 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3555 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3556 * @ipr_cmd: ipr command struct
3557 * @sglist: scatter/gather list
3559 * Builds a microcode download IOA data list (IOADL).
3562 static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3563 struct ipr_sglist *sglist)
3565 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3566 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3567 struct scatterlist *scatterlist = sglist->scatterlist;
3570 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3571 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3572 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3575 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3577 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3578 ioadl[i].flags_and_data_len =
3579 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3581 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3584 ioadl[i-1].flags_and_data_len |=
3585 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3589 * ipr_update_ioa_ucode - Update IOA's microcode
3590 * @ioa_cfg: ioa config struct
3591 * @sglist: scatter/gather list
3593 * Initiate an adapter reset to update the IOA's microcode
3596 * 0 on success / -EIO on failure
3598 static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3599 struct ipr_sglist *sglist)
3601 unsigned long lock_flags;
3603 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3604 while(ioa_cfg->in_reset_reload) {
3605 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3606 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3610 if (ioa_cfg->ucode_sglist) {
3611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 dev_err(&ioa_cfg->pdev->dev,
3613 "Microcode download already in progress\n");
3617 sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3618 sglist->num_sg, DMA_TO_DEVICE);
3620 if (!sglist->num_dma_sg) {
3621 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3622 dev_err(&ioa_cfg->pdev->dev,
3623 "Failed to map microcode download buffer!\n");
3627 ioa_cfg->ucode_sglist = sglist;
3628 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3629 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3630 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3632 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3633 ioa_cfg->ucode_sglist = NULL;
3634 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3639 * ipr_store_update_fw - Update the firmware on the adapter
3640 * @class_dev: device struct
3642 * @count: buffer size
3644 * This function will update the firmware on the adapter.
3647 * count on success / other on failure
3649 static ssize_t ipr_store_update_fw(struct device *dev,
3650 struct device_attribute *attr,
3651 const char *buf, size_t count)
3653 struct Scsi_Host *shost = class_to_shost(dev);
3654 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3655 struct ipr_ucode_image_header *image_hdr;
3656 const struct firmware *fw_entry;
3657 struct ipr_sglist *sglist;
3660 int len, result, dnld_size;
3662 if (!capable(CAP_SYS_ADMIN))
3665 len = snprintf(fname, 99, "%s", buf);
3666 fname[len-1] = '\0';
3668 if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3669 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3673 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3675 if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3676 (ioa_cfg->vpd_cbs->page3_data.card_type &&
3677 ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3678 dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3679 release_firmware(fw_entry);
3683 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3684 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3685 sglist = ipr_alloc_ucode_buffer(dnld_size);
3688 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3689 release_firmware(fw_entry);
3693 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3696 dev_err(&ioa_cfg->pdev->dev,
3697 "Microcode buffer copy to DMA buffer failed\n");
3701 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3706 ipr_free_ucode_buffer(sglist);
3707 release_firmware(fw_entry);
3711 static struct device_attribute ipr_update_fw_attr = {
3713 .name = "update_fw",
3716 .store = ipr_store_update_fw
3719 static struct device_attribute *ipr_ioa_attrs[] = {
3720 &ipr_fw_version_attr,
3721 &ipr_log_level_attr,
3722 &ipr_diagnostics_attr,
3723 &ipr_ioa_state_attr,
3724 &ipr_ioa_reset_attr,
3725 &ipr_update_fw_attr,
3729 #ifdef CONFIG_SCSI_IPR_DUMP
3731 * ipr_read_dump - Dump the adapter
3732 * @kobj: kobject struct
3733 * @bin_attr: bin_attribute struct
3736 * @count: buffer size
3739 * number of bytes printed to buffer
3741 static ssize_t ipr_read_dump(struct kobject *kobj,
3742 struct bin_attribute *bin_attr,
3743 char *buf, loff_t off, size_t count)
3745 struct device *cdev = container_of(kobj, struct device, kobj);
3746 struct Scsi_Host *shost = class_to_shost(cdev);
3747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3748 struct ipr_dump *dump;
3749 unsigned long lock_flags = 0;
3754 if (!capable(CAP_SYS_ADMIN))
3757 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3758 dump = ioa_cfg->dump;
3760 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3761 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3764 kref_get(&dump->kref);
3765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3767 if (off > dump->driver_dump.hdr.len) {
3768 kref_put(&dump->kref, ipr_release_dump);
3772 if (off + count > dump->driver_dump.hdr.len) {
3773 count = dump->driver_dump.hdr.len - off;
3777 if (count && off < sizeof(dump->driver_dump)) {
3778 if (off + count > sizeof(dump->driver_dump))
3779 len = sizeof(dump->driver_dump) - off;
3782 src = (u8 *)&dump->driver_dump + off;
3783 memcpy(buf, src, len);
3789 off -= sizeof(dump->driver_dump);
3791 if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3792 if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3793 len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3796 src = (u8 *)&dump->ioa_dump + off;
3797 memcpy(buf, src, len);
3803 off -= offsetof(struct ipr_ioa_dump, ioa_data);
3806 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3807 len = PAGE_ALIGN(off) - off;
3810 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3811 src += off & ~PAGE_MASK;
3812 memcpy(buf, src, len);
3818 kref_put(&dump->kref, ipr_release_dump);
3823 * ipr_alloc_dump - Prepare for adapter dump
3824 * @ioa_cfg: ioa config struct
3827 * 0 on success / other on failure
3829 static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3831 struct ipr_dump *dump;
3832 unsigned long lock_flags = 0;
3834 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3837 ipr_err("Dump memory allocation failed\n");
3841 kref_init(&dump->kref);
3842 dump->ioa_cfg = ioa_cfg;
3844 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3846 if (INACTIVE != ioa_cfg->sdt_state) {
3847 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3852 ioa_cfg->dump = dump;
3853 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3854 if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3855 ioa_cfg->dump_taken = 1;
3856 schedule_work(&ioa_cfg->work_q);
3858 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3864 * ipr_free_dump - Free adapter dump memory
3865 * @ioa_cfg: ioa config struct
3868 * 0 on success / other on failure
3870 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3872 struct ipr_dump *dump;
3873 unsigned long lock_flags = 0;
3877 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3878 dump = ioa_cfg->dump;
3880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3884 ioa_cfg->dump = NULL;
3885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3887 kref_put(&dump->kref, ipr_release_dump);
3894 * ipr_write_dump - Setup dump state of adapter
3895 * @kobj: kobject struct
3896 * @bin_attr: bin_attribute struct
3899 * @count: buffer size
3902 * number of bytes printed to buffer
3904 static ssize_t ipr_write_dump(struct kobject *kobj,
3905 struct bin_attribute *bin_attr,
3906 char *buf, loff_t off, size_t count)
3908 struct device *cdev = container_of(kobj, struct device, kobj);
3909 struct Scsi_Host *shost = class_to_shost(cdev);
3910 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3913 if (!capable(CAP_SYS_ADMIN))
3917 rc = ipr_alloc_dump(ioa_cfg);
3918 else if (buf[0] == '0')
3919 rc = ipr_free_dump(ioa_cfg);
3929 static struct bin_attribute ipr_dump_attr = {
3932 .mode = S_IRUSR | S_IWUSR,
3935 .read = ipr_read_dump,
3936 .write = ipr_write_dump
3939 static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3943 * ipr_change_queue_depth - Change the device's queue depth
3944 * @sdev: scsi device struct
3945 * @qdepth: depth to set
3946 * @reason: calling context
3951 static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3954 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3955 struct ipr_resource_entry *res;
3956 unsigned long lock_flags = 0;
3958 if (reason != SCSI_QDEPTH_DEFAULT)
3961 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3962 res = (struct ipr_resource_entry *)sdev->hostdata;
3964 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
3965 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
3966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3968 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3969 return sdev->queue_depth;
3973 * ipr_change_queue_type - Change the device's queue type
3974 * @dsev: scsi device struct
3975 * @tag_type: type of tags to use
3978 * actual queue type set
3980 static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3982 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3983 struct ipr_resource_entry *res;
3984 unsigned long lock_flags = 0;
3986 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3987 res = (struct ipr_resource_entry *)sdev->hostdata;
3990 if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3992 * We don't bother quiescing the device here since the
3993 * adapter firmware does it for us.
3995 scsi_set_tag_type(sdev, tag_type);
3998 scsi_activate_tcq(sdev, sdev->queue_depth);
4000 scsi_deactivate_tcq(sdev, sdev->queue_depth);
4006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4011 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4012 * @dev: device struct
4016 * number of bytes printed to buffer
4018 static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4020 struct scsi_device *sdev = to_scsi_device(dev);
4021 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4022 struct ipr_resource_entry *res;
4023 unsigned long lock_flags = 0;
4024 ssize_t len = -ENXIO;
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 res = (struct ipr_resource_entry *)sdev->hostdata;
4029 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4030 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4034 static struct device_attribute ipr_adapter_handle_attr = {
4036 .name = "adapter_handle",
4039 .show = ipr_show_adapter_handle
4043 * ipr_show_resource_path - Show the resource path for this device.
4044 * @dev: device struct
4048 * number of bytes printed to buffer
4050 static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4052 struct scsi_device *sdev = to_scsi_device(dev);
4053 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4054 struct ipr_resource_entry *res;
4055 unsigned long lock_flags = 0;
4056 ssize_t len = -ENXIO;
4057 char buffer[IPR_MAX_RES_PATH_LENGTH];
4059 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4060 res = (struct ipr_resource_entry *)sdev->hostdata;
4062 len = snprintf(buf, PAGE_SIZE, "%s\n",
4063 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4064 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4068 static struct device_attribute ipr_resource_path_attr = {
4070 .name = "resource_path",
4073 .show = ipr_show_resource_path
4076 static struct device_attribute *ipr_dev_attrs[] = {
4077 &ipr_adapter_handle_attr,
4078 &ipr_resource_path_attr,
4083 * ipr_biosparam - Return the HSC mapping
4084 * @sdev: scsi device struct
4085 * @block_device: block device pointer
4086 * @capacity: capacity of the device
4087 * @parm: Array containing returned HSC values.
4089 * This function generates the HSC parms that fdisk uses.
4090 * We want to make sure we return something that places partitions
4091 * on 4k boundaries for best performance with the IOA.
4096 static int ipr_biosparam(struct scsi_device *sdev,
4097 struct block_device *block_device,
4098 sector_t capacity, int *parm)
4106 cylinders = capacity;
4107 sector_div(cylinders, (128 * 32));
4112 parm[2] = cylinders;
4118 * ipr_find_starget - Find target based on bus/target.
4119 * @starget: scsi target struct
4122 * resource entry pointer if found / NULL if not found
4124 static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4126 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4127 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4128 struct ipr_resource_entry *res;
4130 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4131 if ((res->bus == starget->channel) &&
4132 (res->target == starget->id) &&
4141 static struct ata_port_info sata_port_info;
4144 * ipr_target_alloc - Prepare for commands to a SCSI target
4145 * @starget: scsi target struct
4147 * If the device is a SATA device, this function allocates an
4148 * ATA port with libata, else it does nothing.
4151 * 0 on success / non-0 on failure
4153 static int ipr_target_alloc(struct scsi_target *starget)
4155 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4156 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4157 struct ipr_sata_port *sata_port;
4158 struct ata_port *ap;
4159 struct ipr_resource_entry *res;
4160 unsigned long lock_flags;
4162 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4163 res = ipr_find_starget(starget);
4164 starget->hostdata = NULL;
4166 if (res && ipr_is_gata(res)) {
4167 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4168 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4172 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4174 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4175 sata_port->ioa_cfg = ioa_cfg;
4177 sata_port->res = res;
4179 res->sata_port = sata_port;
4180 ap->private_data = sata_port;
4181 starget->hostdata = sata_port;
4187 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4193 * ipr_target_destroy - Destroy a SCSI target
4194 * @starget: scsi target struct
4196 * If the device was a SATA device, this function frees the libata
4197 * ATA port, else it does nothing.
4200 static void ipr_target_destroy(struct scsi_target *starget)
4202 struct ipr_sata_port *sata_port = starget->hostdata;
4203 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4204 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4206 if (ioa_cfg->sis64) {
4207 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4208 clear_bit(starget->id, ioa_cfg->array_ids);
4209 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4210 clear_bit(starget->id, ioa_cfg->vset_ids);
4211 else if (starget->channel == 0)
4212 clear_bit(starget->id, ioa_cfg->target_ids);
4216 starget->hostdata = NULL;
4217 ata_sas_port_destroy(sata_port->ap);
4223 * ipr_find_sdev - Find device based on bus/target/lun.
4224 * @sdev: scsi device struct
4227 * resource entry pointer if found / NULL if not found
4229 static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4231 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4232 struct ipr_resource_entry *res;
4234 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4235 if ((res->bus == sdev->channel) &&
4236 (res->target == sdev->id) &&
4237 (res->lun == sdev->lun))
4245 * ipr_slave_destroy - Unconfigure a SCSI device
4246 * @sdev: scsi device struct
4251 static void ipr_slave_destroy(struct scsi_device *sdev)
4253 struct ipr_resource_entry *res;
4254 struct ipr_ioa_cfg *ioa_cfg;
4255 unsigned long lock_flags = 0;
4257 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4259 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4260 res = (struct ipr_resource_entry *) sdev->hostdata;
4263 ata_port_disable(res->sata_port->ap);
4264 sdev->hostdata = NULL;
4266 res->sata_port = NULL;
4268 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4272 * ipr_slave_configure - Configure a SCSI device
4273 * @sdev: scsi device struct
4275 * This function configures the specified scsi device.
4280 static int ipr_slave_configure(struct scsi_device *sdev)
4282 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4283 struct ipr_resource_entry *res;
4284 struct ata_port *ap = NULL;
4285 unsigned long lock_flags = 0;
4286 char buffer[IPR_MAX_RES_PATH_LENGTH];
4288 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4289 res = sdev->hostdata;
4291 if (ipr_is_af_dasd_device(res))
4292 sdev->type = TYPE_RAID;
4293 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4294 sdev->scsi_level = 4;
4295 sdev->no_uld_attach = 1;
4297 if (ipr_is_vset_device(res)) {
4298 blk_queue_rq_timeout(sdev->request_queue,
4299 IPR_VSET_RW_TIMEOUT);
4300 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4302 if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4303 sdev->allow_restart = 1;
4304 if (ipr_is_gata(res) && res->sata_port)
4305 ap = res->sata_port->ap;
4306 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4309 scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4310 ata_sas_slave_configure(sdev, ap);
4312 scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4314 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4315 ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4318 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4323 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4324 * @sdev: scsi device struct
4326 * This function initializes an ATA port so that future commands
4327 * sent through queuecommand will work.
4332 static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4334 struct ipr_sata_port *sata_port = NULL;
4338 if (sdev->sdev_target)
4339 sata_port = sdev->sdev_target->hostdata;
4341 rc = ata_sas_port_init(sata_port->ap);
4343 ipr_slave_destroy(sdev);
4350 * ipr_slave_alloc - Prepare for commands to a device.
4351 * @sdev: scsi device struct
4353 * This function saves a pointer to the resource entry
4354 * in the scsi device struct if the device exists. We
4355 * can then use this pointer in ipr_queuecommand when
4356 * handling new commands.
4359 * 0 on success / -ENXIO if device does not exist
4361 static int ipr_slave_alloc(struct scsi_device *sdev)
4363 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4364 struct ipr_resource_entry *res;
4365 unsigned long lock_flags;
4368 sdev->hostdata = NULL;
4370 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4372 res = ipr_find_sdev(sdev);
4377 sdev->hostdata = res;
4378 if (!ipr_is_naca_model(res))
4379 res->needs_sync_complete = 1;
4381 if (ipr_is_gata(res)) {
4382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4383 return ipr_ata_slave_alloc(sdev);
4387 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4393 * ipr_eh_host_reset - Reset the host adapter
4394 * @scsi_cmd: scsi command struct
4399 static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4401 struct ipr_ioa_cfg *ioa_cfg;
4405 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4407 dev_err(&ioa_cfg->pdev->dev,
4408 "Adapter being reset as a result of error recovery.\n");
4410 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4411 ioa_cfg->sdt_state = GET_DUMP;
4413 rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4419 static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4423 spin_lock_irq(cmd->device->host->host_lock);
4424 rc = __ipr_eh_host_reset(cmd);
4425 spin_unlock_irq(cmd->device->host->host_lock);
4431 * ipr_device_reset - Reset the device
4432 * @ioa_cfg: ioa config struct
4433 * @res: resource entry struct
4435 * This function issues a device reset to the affected device.
4436 * If the device is a SCSI device, a LUN reset will be sent
4437 * to the device first. If that does not work, a target reset
4438 * will be sent. If the device is a SATA device, a PHY reset will
4442 * 0 on success / non-zero on failure
4444 static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4445 struct ipr_resource_entry *res)
4447 struct ipr_cmnd *ipr_cmd;
4448 struct ipr_ioarcb *ioarcb;
4449 struct ipr_cmd_pkt *cmd_pkt;
4450 struct ipr_ioarcb_ata_regs *regs;
4454 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4455 ioarcb = &ipr_cmd->ioarcb;
4456 cmd_pkt = &ioarcb->cmd_pkt;
4458 if (ipr_cmd->ioa_cfg->sis64) {
4459 regs = &ipr_cmd->i.ata_ioadl.regs;
4460 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4462 regs = &ioarcb->u.add_data.u.regs;
4464 ioarcb->res_handle = res->res_handle;
4465 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4466 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4467 if (ipr_is_gata(res)) {
4468 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4469 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4470 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4473 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4474 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4475 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4476 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
4477 memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
4478 sizeof(struct ipr_ioasa_gata));
4481 return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4485 * ipr_sata_reset - Reset the SATA port
4486 * @link: SATA link to reset
4487 * @classes: class of the attached device
4489 * This function issues a SATA phy reset to the affected ATA link.
4492 * 0 on success / non-zero on failure
4494 static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4495 unsigned long deadline)
4497 struct ipr_sata_port *sata_port = link->ap->private_data;
4498 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4499 struct ipr_resource_entry *res;
4500 unsigned long lock_flags = 0;
4504 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4505 while(ioa_cfg->in_reset_reload) {
4506 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4507 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4508 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4511 res = sata_port->res;
4513 rc = ipr_device_reset(ioa_cfg, res);
4514 *classes = res->ata_class;
4517 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4523 * ipr_eh_dev_reset - Reset the device
4524 * @scsi_cmd: scsi command struct
4526 * This function issues a device reset to the affected device.
4527 * A LUN reset will be sent to the device first. If that does
4528 * not work, a target reset will be sent.
4533 static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4535 struct ipr_cmnd *ipr_cmd;
4536 struct ipr_ioa_cfg *ioa_cfg;
4537 struct ipr_resource_entry *res;
4538 struct ata_port *ap;
4542 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4543 res = scsi_cmd->device->hostdata;
4549 * If we are currently going through reset/reload, return failed. This will force the
4550 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4553 if (ioa_cfg->in_reset_reload)
4555 if (ioa_cfg->ioa_is_dead)
4558 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4559 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4560 if (ipr_cmd->scsi_cmd)
4561 ipr_cmd->done = ipr_scsi_eh_done;
4563 ipr_cmd->done = ipr_sata_eh_done;
4564 if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4565 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4566 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4571 res->resetting_device = 1;
4572 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4574 if (ipr_is_gata(res) && res->sata_port) {
4575 ap = res->sata_port->ap;
4576 spin_unlock_irq(scsi_cmd->device->host->host_lock);
4577 ata_std_error_handler(ap);
4578 spin_lock_irq(scsi_cmd->device->host->host_lock);
4580 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4581 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4587 rc = ipr_device_reset(ioa_cfg, res);
4588 res->resetting_device = 0;
4591 return (rc ? FAILED : SUCCESS);
4594 static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4598 spin_lock_irq(cmd->device->host->host_lock);
4599 rc = __ipr_eh_dev_reset(cmd);
4600 spin_unlock_irq(cmd->device->host->host_lock);
4606 * ipr_bus_reset_done - Op done function for bus reset.
4607 * @ipr_cmd: ipr command struct
4609 * This function is the op done function for a bus reset
4614 static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4616 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4617 struct ipr_resource_entry *res;
4620 if (!ioa_cfg->sis64)
4621 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4622 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4623 scsi_report_bus_reset(ioa_cfg->host, res->bus);
4629 * If abort has not completed, indicate the reset has, else call the
4630 * abort's done function to wake the sleeping eh thread
4632 if (ipr_cmd->sibling->sibling)
4633 ipr_cmd->sibling->sibling = NULL;
4635 ipr_cmd->sibling->done(ipr_cmd->sibling);
4637 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4642 * ipr_abort_timeout - An abort task has timed out
4643 * @ipr_cmd: ipr command struct
4645 * This function handles when an abort task times out. If this
4646 * happens we issue a bus reset since we have resources tied
4647 * up that must be freed before returning to the midlayer.
4652 static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4654 struct ipr_cmnd *reset_cmd;
4655 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4656 struct ipr_cmd_pkt *cmd_pkt;
4657 unsigned long lock_flags = 0;
4660 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4661 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4662 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4666 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4667 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4668 ipr_cmd->sibling = reset_cmd;
4669 reset_cmd->sibling = ipr_cmd;
4670 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4671 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4672 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4673 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4674 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4676 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4677 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4682 * ipr_cancel_op - Cancel specified op
4683 * @scsi_cmd: scsi command struct
4685 * This function cancels specified op.
4690 static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4692 struct ipr_cmnd *ipr_cmd;
4693 struct ipr_ioa_cfg *ioa_cfg;
4694 struct ipr_resource_entry *res;
4695 struct ipr_cmd_pkt *cmd_pkt;
4700 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4701 res = scsi_cmd->device->hostdata;
4703 /* If we are currently going through reset/reload, return failed.
4704 * This will force the mid-layer to call ipr_eh_host_reset,
4705 * which will then go to sleep and wait for the reset to complete
4707 if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4709 if (!res || !ipr_is_gscsi(res))
4712 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4713 if (ipr_cmd->scsi_cmd == scsi_cmd) {
4714 ipr_cmd->done = ipr_scsi_eh_done;
4723 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4724 ipr_cmd->ioarcb.res_handle = res->res_handle;
4725 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4726 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4727 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4728 ipr_cmd->u.sdev = scsi_cmd->device;
4730 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4732 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4733 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4736 * If the abort task timed out and we sent a bus reset, we will get
4737 * one the following responses to the abort
4739 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4744 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4745 if (!ipr_is_naca_model(res))
4746 res->needs_sync_complete = 1;
4749 return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4753 * ipr_eh_abort - Abort a single op
4754 * @scsi_cmd: scsi command struct
4759 static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4761 unsigned long flags;
4766 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4767 rc = ipr_cancel_op(scsi_cmd);
4768 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4775 * ipr_handle_other_interrupt - Handle "other" interrupts
4776 * @ioa_cfg: ioa config struct
4777 * @int_reg: interrupt register
4780 * IRQ_NONE / IRQ_HANDLED
4782 static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4783 volatile u32 int_reg)
4785 irqreturn_t rc = IRQ_HANDLED;
4787 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4788 /* Mask the interrupt */
4789 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4791 /* Clear the interrupt */
4792 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4793 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4795 list_del(&ioa_cfg->reset_cmd->queue);
4796 del_timer(&ioa_cfg->reset_cmd->timer);
4797 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4799 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4800 ioa_cfg->ioa_unit_checked = 1;
4802 dev_err(&ioa_cfg->pdev->dev,
4803 "Permanent IOA failure. 0x%08X\n", int_reg);
4805 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4806 ioa_cfg->sdt_state = GET_DUMP;
4808 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4809 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4816 * ipr_isr_eh - Interrupt service routine error handler
4817 * @ioa_cfg: ioa config struct
4818 * @msg: message to log
4823 static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4825 ioa_cfg->errors_logged++;
4826 dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4828 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4829 ioa_cfg->sdt_state = GET_DUMP;
4831 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4835 * ipr_isr - Interrupt service routine
4837 * @devp: pointer to ioa config struct
4840 * IRQ_NONE / IRQ_HANDLED
4842 static irqreturn_t ipr_isr(int irq, void *devp)
4844 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4845 unsigned long lock_flags = 0;
4846 volatile u32 int_reg, int_mask_reg;
4850 struct ipr_cmnd *ipr_cmd;
4851 irqreturn_t rc = IRQ_NONE;
4853 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4855 /* If interrupts are disabled, ignore the interrupt */
4856 if (!ioa_cfg->allow_interrupts) {
4857 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4861 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4862 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4864 /* If an interrupt on the adapter did not occur, ignore it.
4865 * Or in the case of SIS 64, check for a stage change interrupt.
4867 if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4868 if (ioa_cfg->sis64) {
4869 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4870 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4871 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4873 /* clear stage change */
4874 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4875 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4876 list_del(&ioa_cfg->reset_cmd->queue);
4877 del_timer(&ioa_cfg->reset_cmd->timer);
4878 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4879 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4884 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4891 while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4892 ioa_cfg->toggle_bit) {
4894 cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4895 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4897 if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4898 ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4899 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4903 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4905 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4907 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4909 list_del(&ipr_cmd->queue);
4910 del_timer(&ipr_cmd->timer);
4911 ipr_cmd->done(ipr_cmd);
4915 if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4916 ioa_cfg->hrrq_curr++;
4918 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4919 ioa_cfg->toggle_bit ^= 1u;
4923 if (ipr_cmd != NULL) {
4924 /* Clear the PCI interrupt */
4926 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4927 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4928 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4929 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4931 if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4932 ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4933 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 if (unlikely(rc == IRQ_NONE))
4942 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4944 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4949 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4950 * @ioa_cfg: ioa config struct
4951 * @ipr_cmd: ipr command struct
4954 * 0 on success / -1 on failure
4956 static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
4957 struct ipr_cmnd *ipr_cmd)
4960 struct scatterlist *sg;
4962 u32 ioadl_flags = 0;
4963 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4964 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4965 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
4967 length = scsi_bufflen(scsi_cmd);
4971 nseg = scsi_dma_map(scsi_cmd);
4973 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4977 ipr_cmd->dma_use_sg = nseg;
4979 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4980 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4981 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4982 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
4983 ioadl_flags = IPR_IOADL_FLAGS_READ;
4985 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4986 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
4987 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
4988 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
4991 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4996 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
4997 * @ioa_cfg: ioa config struct
4998 * @ipr_cmd: ipr command struct
5001 * 0 on success / -1 on failure
5003 static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5004 struct ipr_cmnd *ipr_cmd)
5007 struct scatterlist *sg;
5009 u32 ioadl_flags = 0;
5010 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5011 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5012 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5014 length = scsi_bufflen(scsi_cmd);
5018 nseg = scsi_dma_map(scsi_cmd);
5020 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5024 ipr_cmd->dma_use_sg = nseg;
5026 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5027 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5028 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5029 ioarcb->data_transfer_length = cpu_to_be32(length);
5031 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5032 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5033 ioadl_flags = IPR_IOADL_FLAGS_READ;
5034 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5035 ioarcb->read_ioadl_len =
5036 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5039 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5040 ioadl = ioarcb->u.add_data.u.ioadl;
5041 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5042 offsetof(struct ipr_ioarcb, u.add_data));
5043 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5046 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5047 ioadl[i].flags_and_data_len =
5048 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5049 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5052 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5057 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5058 * @scsi_cmd: scsi command struct
5063 static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5066 u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5068 if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5070 case MSG_SIMPLE_TAG:
5071 rc = IPR_FLAGS_LO_SIMPLE_TASK;
5074 rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5076 case MSG_ORDERED_TAG:
5077 rc = IPR_FLAGS_LO_ORDERED_TASK;
5086 * ipr_erp_done - Process completion of ERP for a device
5087 * @ipr_cmd: ipr command struct
5089 * This function copies the sense buffer into the scsi_cmd
5090 * struct and pushes the scsi_done function.
5095 static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5097 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5098 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5099 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5100 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5102 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5103 scsi_cmd->result |= (DID_ERROR << 16);
5104 scmd_printk(KERN_ERR, scsi_cmd,
5105 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5107 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5108 SCSI_SENSE_BUFFERSIZE);
5112 if (!ipr_is_naca_model(res))
5113 res->needs_sync_complete = 1;
5116 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5117 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5118 scsi_cmd->scsi_done(scsi_cmd);
5122 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5123 * @ipr_cmd: ipr command struct
5128 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5130 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5131 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5132 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5134 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5135 ioarcb->data_transfer_length = 0;
5136 ioarcb->read_data_transfer_length = 0;
5137 ioarcb->ioadl_len = 0;
5138 ioarcb->read_ioadl_len = 0;
5140 ioasa->residual_data_len = 0;
5142 if (ipr_cmd->ioa_cfg->sis64)
5143 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5144 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5146 ioarcb->write_ioadl_addr =
5147 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5148 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5153 * ipr_erp_request_sense - Send request sense to a device
5154 * @ipr_cmd: ipr command struct
5156 * This function sends a request sense to a device as a result
5157 * of a check condition.
5162 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5164 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5165 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5167 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5168 ipr_erp_done(ipr_cmd);
5172 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5174 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5175 cmd_pkt->cdb[0] = REQUEST_SENSE;
5176 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5177 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5178 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5179 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5181 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5182 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5184 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5185 IPR_REQUEST_SENSE_TIMEOUT * 2);
5189 * ipr_erp_cancel_all - Send cancel all to a device
5190 * @ipr_cmd: ipr command struct
5192 * This function sends a cancel all to a device to clear the
5193 * queue. If we are running TCQ on the device, QERR is set to 1,
5194 * which means all outstanding ops have been dropped on the floor.
5195 * Cancel all will return them to us.
5200 static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5202 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5203 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5204 struct ipr_cmd_pkt *cmd_pkt;
5208 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5210 if (!scsi_get_tag_type(scsi_cmd->device)) {
5211 ipr_erp_request_sense(ipr_cmd);
5215 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5216 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5217 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5219 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5220 IPR_CANCEL_ALL_TIMEOUT);
5224 * ipr_dump_ioasa - Dump contents of IOASA
5225 * @ioa_cfg: ioa config struct
5226 * @ipr_cmd: ipr command struct
5227 * @res: resource entry struct
5229 * This function is invoked by the interrupt handler when ops
5230 * fail. It will log the IOASA if appropriate. Only called
5236 static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5237 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5241 u32 ioasc, fd_ioasc;
5242 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5243 __be32 *ioasa_data = (__be32 *)ioasa;
5246 ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
5247 fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
5252 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5255 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5256 error_index = ipr_get_error(fd_ioasc);
5258 error_index = ipr_get_error(ioasc);
5260 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5261 /* Don't log an error if the IOA already logged one */
5262 if (ioasa->ilid != 0)
5265 if (!ipr_is_gscsi(res))
5268 if (ipr_error_table[error_index].log_ioasa == 0)
5272 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5274 if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
5275 data_len = sizeof(struct ipr_ioasa);
5277 data_len = be16_to_cpu(ioasa->ret_stat_len);
5279 ipr_err("IOASA Dump:\n");
5281 for (i = 0; i < data_len / 4; i += 4) {
5282 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5283 be32_to_cpu(ioasa_data[i]),
5284 be32_to_cpu(ioasa_data[i+1]),
5285 be32_to_cpu(ioasa_data[i+2]),
5286 be32_to_cpu(ioasa_data[i+3]));
5291 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5293 * @sense_buf: sense data buffer
5298 static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5301 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5302 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5303 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5304 u32 ioasc = be32_to_cpu(ioasa->ioasc);
5306 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5308 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5311 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5313 if (ipr_is_vset_device(res) &&
5314 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5315 ioasa->u.vset.failing_lba_hi != 0) {
5316 sense_buf[0] = 0x72;
5317 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5318 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5319 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5323 sense_buf[9] = 0x0A;
5324 sense_buf[10] = 0x80;
5326 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5328 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5329 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5330 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5331 sense_buf[15] = failing_lba & 0x000000ff;
5333 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5335 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5336 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5337 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5338 sense_buf[19] = failing_lba & 0x000000ff;
5340 sense_buf[0] = 0x70;
5341 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5342 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5343 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5345 /* Illegal request */
5346 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5347 (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5348 sense_buf[7] = 10; /* additional length */
5350 /* IOARCB was in error */
5351 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5352 sense_buf[15] = 0xC0;
5353 else /* Parameter data was invalid */
5354 sense_buf[15] = 0x80;
5357 ((IPR_FIELD_POINTER_MASK &
5358 be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
5360 (IPR_FIELD_POINTER_MASK &
5361 be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
5363 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5364 if (ipr_is_vset_device(res))
5365 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5367 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5369 sense_buf[0] |= 0x80; /* Or in the Valid bit */
5370 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5371 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5372 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5373 sense_buf[6] = failing_lba & 0x000000ff;
5376 sense_buf[7] = 6; /* additional length */
5382 * ipr_get_autosense - Copy autosense data to sense buffer
5383 * @ipr_cmd: ipr command struct
5385 * This function copies the autosense buffer to the buffer
5386 * in the scsi_cmd, if there is autosense available.
5389 * 1 if autosense was available / 0 if not
5391 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5393 struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
5395 if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5398 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5399 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5400 SCSI_SENSE_BUFFERSIZE));
5405 * ipr_erp_start - Process an error response for a SCSI op
5406 * @ioa_cfg: ioa config struct
5407 * @ipr_cmd: ipr command struct
5409 * This function determines whether or not to initiate ERP
5410 * on the affected device.
5415 static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5416 struct ipr_cmnd *ipr_cmd)
5418 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5419 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5420 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5421 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5424 ipr_scsi_eh_done(ipr_cmd);
5428 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5429 ipr_gen_sense(ipr_cmd);
5431 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5433 switch (masked_ioasc) {
5434 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5435 if (ipr_is_naca_model(res))
5436 scsi_cmd->result |= (DID_ABORT << 16);
5438 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5440 case IPR_IOASC_IR_RESOURCE_HANDLE:
5441 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5442 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5444 case IPR_IOASC_HW_SEL_TIMEOUT:
5445 scsi_cmd->result |= (DID_NO_CONNECT << 16);
5446 if (!ipr_is_naca_model(res))
5447 res->needs_sync_complete = 1;
5449 case IPR_IOASC_SYNC_REQUIRED:
5451 res->needs_sync_complete = 1;
5452 scsi_cmd->result |= (DID_IMM_RETRY << 16);
5454 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5455 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5456 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5458 case IPR_IOASC_BUS_WAS_RESET:
5459 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5461 * Report the bus reset and ask for a retry. The device
5462 * will give CC/UA the next command.
5464 if (!res->resetting_device)
5465 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5466 scsi_cmd->result |= (DID_ERROR << 16);
5467 if (!ipr_is_naca_model(res))
5468 res->needs_sync_complete = 1;
5470 case IPR_IOASC_HW_DEV_BUS_STATUS:
5471 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5472 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5473 if (!ipr_get_autosense(ipr_cmd)) {
5474 if (!ipr_is_naca_model(res)) {
5475 ipr_erp_cancel_all(ipr_cmd);
5480 if (!ipr_is_naca_model(res))
5481 res->needs_sync_complete = 1;
5483 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5486 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5487 scsi_cmd->result |= (DID_ERROR << 16);
5488 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5489 res->needs_sync_complete = 1;
5493 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5494 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5495 scsi_cmd->scsi_done(scsi_cmd);
5499 * ipr_scsi_done - mid-layer done function
5500 * @ipr_cmd: ipr command struct
5502 * This function is invoked by the interrupt handler for
5503 * ops generated by the SCSI mid-layer
5508 static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5510 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5511 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5512 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5514 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
5516 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5517 scsi_dma_unmap(ipr_cmd->scsi_cmd);
5518 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5519 scsi_cmd->scsi_done(scsi_cmd);
5521 ipr_erp_start(ioa_cfg, ipr_cmd);
5525 * ipr_queuecommand - Queue a mid-layer request
5526 * @scsi_cmd: scsi command struct
5527 * @done: done function
5529 * This function queues a request generated by the mid-layer.
5533 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5534 * SCSI_MLQUEUE_HOST_BUSY if host is busy
5536 static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5537 void (*done) (struct scsi_cmnd *))
5539 struct ipr_ioa_cfg *ioa_cfg;
5540 struct ipr_resource_entry *res;
5541 struct ipr_ioarcb *ioarcb;
5542 struct ipr_cmnd *ipr_cmd;
5545 scsi_cmd->scsi_done = done;
5546 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5547 res = scsi_cmd->device->hostdata;
5548 scsi_cmd->result = (DID_OK << 16);
5551 * We are currently blocking all devices due to a host reset
5552 * We have told the host to stop giving us new requests, but
5553 * ERP ops don't count. FIXME
5555 if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5556 return SCSI_MLQUEUE_HOST_BUSY;
5559 * FIXME - Create scsi_set_host_offline interface
5560 * and the ioa_is_dead check can be removed
5562 if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5563 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5564 scsi_cmd->result = (DID_NO_CONNECT << 16);
5565 scsi_cmd->scsi_done(scsi_cmd);
5569 if (ipr_is_gata(res) && res->sata_port)
5570 return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5572 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5573 ioarcb = &ipr_cmd->ioarcb;
5574 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5576 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5577 ipr_cmd->scsi_cmd = scsi_cmd;
5578 ioarcb->res_handle = res->res_handle;
5579 ipr_cmd->done = ipr_scsi_done;
5580 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5582 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5583 if (scsi_cmd->underflow == 0)
5584 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5586 if (res->needs_sync_complete) {
5587 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5588 res->needs_sync_complete = 0;
5591 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5593 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5594 ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5597 if (scsi_cmd->cmnd[0] >= 0xC0 &&
5598 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5599 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5601 if (likely(rc == 0)) {
5603 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5605 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5608 if (likely(rc == 0)) {
5610 ipr_send_command(ipr_cmd);
5612 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5613 return SCSI_MLQUEUE_HOST_BUSY;
5620 * ipr_ioctl - IOCTL handler
5621 * @sdev: scsi device struct
5626 * 0 on success / other on failure
5628 static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5630 struct ipr_resource_entry *res;
5632 res = (struct ipr_resource_entry *)sdev->hostdata;
5633 if (res && ipr_is_gata(res)) {
5634 if (cmd == HDIO_GET_IDENTITY)
5636 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5643 * ipr_info - Get information about the card/driver
5644 * @scsi_host: scsi host struct
5647 * pointer to buffer with description string
5649 static const char * ipr_ioa_info(struct Scsi_Host *host)
5651 static char buffer[512];
5652 struct ipr_ioa_cfg *ioa_cfg;
5653 unsigned long lock_flags = 0;
5655 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5657 spin_lock_irqsave(host->host_lock, lock_flags);
5658 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5659 spin_unlock_irqrestore(host->host_lock, lock_flags);
5664 static struct scsi_host_template driver_template = {
5665 .module = THIS_MODULE,
5667 .info = ipr_ioa_info,
5669 .queuecommand = ipr_queuecommand,
5670 .eh_abort_handler = ipr_eh_abort,
5671 .eh_device_reset_handler = ipr_eh_dev_reset,
5672 .eh_host_reset_handler = ipr_eh_host_reset,
5673 .slave_alloc = ipr_slave_alloc,
5674 .slave_configure = ipr_slave_configure,
5675 .slave_destroy = ipr_slave_destroy,
5676 .target_alloc = ipr_target_alloc,
5677 .target_destroy = ipr_target_destroy,
5678 .change_queue_depth = ipr_change_queue_depth,
5679 .change_queue_type = ipr_change_queue_type,
5680 .bios_param = ipr_biosparam,
5681 .can_queue = IPR_MAX_COMMANDS,
5683 .sg_tablesize = IPR_MAX_SGLIST,
5684 .max_sectors = IPR_IOA_MAX_SECTORS,
5685 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5686 .use_clustering = ENABLE_CLUSTERING,
5687 .shost_attrs = ipr_ioa_attrs,
5688 .sdev_attrs = ipr_dev_attrs,
5689 .proc_name = IPR_NAME
5693 * ipr_ata_phy_reset - libata phy_reset handler
5694 * @ap: ata port to reset
5697 static void ipr_ata_phy_reset(struct ata_port *ap)
5699 unsigned long flags;
5700 struct ipr_sata_port *sata_port = ap->private_data;
5701 struct ipr_resource_entry *res = sata_port->res;
5702 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5706 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5707 while(ioa_cfg->in_reset_reload) {
5708 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5709 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5710 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5713 if (!ioa_cfg->allow_cmds)
5716 rc = ipr_device_reset(ioa_cfg, res);
5719 ata_port_disable(ap);
5723 ap->link.device[0].class = res->ata_class;
5724 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5725 ata_port_disable(ap);
5728 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5733 * ipr_ata_post_internal - Cleanup after an internal command
5734 * @qc: ATA queued command
5739 static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5741 struct ipr_sata_port *sata_port = qc->ap->private_data;
5742 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5743 struct ipr_cmnd *ipr_cmd;
5744 unsigned long flags;
5746 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5747 while(ioa_cfg->in_reset_reload) {
5748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5750 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5753 list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5754 if (ipr_cmd->qc == qc) {
5755 ipr_device_reset(ioa_cfg, sata_port->res);
5759 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5763 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5764 * @regs: destination
5765 * @tf: source ATA taskfile
5770 static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5771 struct ata_taskfile *tf)
5773 regs->feature = tf->feature;
5774 regs->nsect = tf->nsect;
5775 regs->lbal = tf->lbal;
5776 regs->lbam = tf->lbam;
5777 regs->lbah = tf->lbah;
5778 regs->device = tf->device;
5779 regs->command = tf->command;
5780 regs->hob_feature = tf->hob_feature;
5781 regs->hob_nsect = tf->hob_nsect;
5782 regs->hob_lbal = tf->hob_lbal;
5783 regs->hob_lbam = tf->hob_lbam;
5784 regs->hob_lbah = tf->hob_lbah;
5785 regs->ctl = tf->ctl;
5789 * ipr_sata_done - done function for SATA commands
5790 * @ipr_cmd: ipr command struct
5792 * This function is invoked by the interrupt handler for
5793 * ops generated by the SCSI mid-layer to SATA devices
5798 static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5800 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5801 struct ata_queued_cmd *qc = ipr_cmd->qc;
5802 struct ipr_sata_port *sata_port = qc->ap->private_data;
5803 struct ipr_resource_entry *res = sata_port->res;
5804 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5806 memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
5807 sizeof(struct ipr_ioasa_gata));
5808 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5810 if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5811 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5813 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5814 qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5816 qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
5817 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5818 ata_qc_complete(qc);
5822 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5823 * @ipr_cmd: ipr command struct
5824 * @qc: ATA queued command
5827 static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5828 struct ata_queued_cmd *qc)
5830 u32 ioadl_flags = 0;
5831 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5832 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5833 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5834 int len = qc->nbytes;
5835 struct scatterlist *sg;
5837 dma_addr_t dma_addr = ipr_cmd->dma_addr;
5842 if (qc->dma_dir == DMA_TO_DEVICE) {
5843 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5844 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5845 } else if (qc->dma_dir == DMA_FROM_DEVICE)
5846 ioadl_flags = IPR_IOADL_FLAGS_READ;
5848 ioarcb->data_transfer_length = cpu_to_be32(len);
5850 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5851 ioarcb->u.sis64_addr_data.data_ioadl_addr =
5852 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5854 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5855 ioadl64->flags = cpu_to_be32(ioadl_flags);
5856 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5857 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5859 last_ioadl64 = ioadl64;
5863 if (likely(last_ioadl64))
5864 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5868 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5869 * @ipr_cmd: ipr command struct
5870 * @qc: ATA queued command
5873 static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5874 struct ata_queued_cmd *qc)
5876 u32 ioadl_flags = 0;
5877 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5878 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5879 struct ipr_ioadl_desc *last_ioadl = NULL;
5880 int len = qc->nbytes;
5881 struct scatterlist *sg;
5887 if (qc->dma_dir == DMA_TO_DEVICE) {
5888 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5889 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5890 ioarcb->data_transfer_length = cpu_to_be32(len);
5892 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5893 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
5894 ioadl_flags = IPR_IOADL_FLAGS_READ;
5895 ioarcb->read_data_transfer_length = cpu_to_be32(len);
5896 ioarcb->read_ioadl_len =
5897 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5900 for_each_sg(qc->sg, sg, qc->n_elem, si) {
5901 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5902 ioadl->address = cpu_to_be32(sg_dma_address(sg));
5908 if (likely(last_ioadl))
5909 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5913 * ipr_qc_issue - Issue a SATA qc to a device
5914 * @qc: queued command
5919 static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5921 struct ata_port *ap = qc->ap;
5922 struct ipr_sata_port *sata_port = ap->private_data;
5923 struct ipr_resource_entry *res = sata_port->res;
5924 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5925 struct ipr_cmnd *ipr_cmd;
5926 struct ipr_ioarcb *ioarcb;
5927 struct ipr_ioarcb_ata_regs *regs;
5929 if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5930 return AC_ERR_SYSTEM;
5932 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5933 ioarcb = &ipr_cmd->ioarcb;
5935 if (ioa_cfg->sis64) {
5936 regs = &ipr_cmd->i.ata_ioadl.regs;
5937 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5939 regs = &ioarcb->u.add_data.u.regs;
5941 memset(regs, 0, sizeof(*regs));
5942 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
5944 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5946 ipr_cmd->done = ipr_sata_done;
5947 ipr_cmd->ioarcb.res_handle = res->res_handle;
5948 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
5949 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5950 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5951 ipr_cmd->dma_use_sg = qc->n_elem;
5954 ipr_build_ata_ioadl64(ipr_cmd, qc);
5956 ipr_build_ata_ioadl(ipr_cmd, qc);
5958 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5959 ipr_copy_sata_tf(regs, &qc->tf);
5960 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
5961 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5963 switch (qc->tf.protocol) {
5964 case ATA_PROT_NODATA:
5969 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5972 case ATAPI_PROT_PIO:
5973 case ATAPI_PROT_NODATA:
5974 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5977 case ATAPI_PROT_DMA:
5978 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
5979 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
5984 return AC_ERR_INVALID;
5989 ipr_send_command(ipr_cmd);
5995 * ipr_qc_fill_rtf - Read result TF
5996 * @qc: ATA queued command
6001 static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6003 struct ipr_sata_port *sata_port = qc->ap->private_data;
6004 struct ipr_ioasa_gata *g = &sata_port->ioasa;
6005 struct ata_taskfile *tf = &qc->result_tf;
6007 tf->feature = g->error;
6008 tf->nsect = g->nsect;
6012 tf->device = g->device;
6013 tf->command = g->status;
6014 tf->hob_nsect = g->hob_nsect;
6015 tf->hob_lbal = g->hob_lbal;
6016 tf->hob_lbam = g->hob_lbam;
6017 tf->hob_lbah = g->hob_lbah;
6018 tf->ctl = g->alt_status;
6023 static struct ata_port_operations ipr_sata_ops = {
6024 .phy_reset = ipr_ata_phy_reset,
6025 .hardreset = ipr_sata_reset,
6026 .post_internal_cmd = ipr_ata_post_internal,
6027 .qc_prep = ata_noop_qc_prep,
6028 .qc_issue = ipr_qc_issue,
6029 .qc_fill_rtf = ipr_qc_fill_rtf,
6030 .port_start = ata_sas_port_start,
6031 .port_stop = ata_sas_port_stop
6034 static struct ata_port_info sata_port_info = {
6035 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6036 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6037 .pio_mask = 0x10, /* pio4 */
6039 .udma_mask = 0x7f, /* udma0-6 */
6040 .port_ops = &ipr_sata_ops
6043 #ifdef CONFIG_PPC_PSERIES
6044 static const u16 ipr_blocked_processors[] = {
6056 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6057 * @ioa_cfg: ioa cfg struct
6059 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6060 * certain pSeries hardware. This function determines if the given
6061 * adapter is in one of these confgurations or not.
6064 * 1 if adapter is not supported / 0 if adapter is supported
6066 static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6070 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6071 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6072 if (__is_processor(ipr_blocked_processors[i]))
6079 #define ipr_invalid_adapter(ioa_cfg) 0
6083 * ipr_ioa_bringdown_done - IOA bring down completion.
6084 * @ipr_cmd: ipr command struct
6086 * This function processes the completion of an adapter bring down.
6087 * It wakes any reset sleepers.
6092 static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6094 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6097 ioa_cfg->in_reset_reload = 0;
6098 ioa_cfg->reset_retries = 0;
6099 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6100 wake_up_all(&ioa_cfg->reset_wait_q);
6102 spin_unlock_irq(ioa_cfg->host->host_lock);
6103 scsi_unblock_requests(ioa_cfg->host);
6104 spin_lock_irq(ioa_cfg->host->host_lock);
6107 return IPR_RC_JOB_RETURN;
6111 * ipr_ioa_reset_done - IOA reset completion.
6112 * @ipr_cmd: ipr command struct
6114 * This function processes the completion of an adapter reset.
6115 * It schedules any necessary mid-layer add/removes and
6116 * wakes any reset sleepers.
6121 static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6123 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6124 struct ipr_resource_entry *res;
6125 struct ipr_hostrcb *hostrcb, *temp;
6129 ioa_cfg->in_reset_reload = 0;
6130 ioa_cfg->allow_cmds = 1;
6131 ioa_cfg->reset_cmd = NULL;
6132 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6134 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6135 if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6140 schedule_work(&ioa_cfg->work_q);
6142 list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6143 list_del(&hostrcb->queue);
6144 if (i++ < IPR_NUM_LOG_HCAMS)
6145 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6147 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6150 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6151 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6153 ioa_cfg->reset_retries = 0;
6154 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6155 wake_up_all(&ioa_cfg->reset_wait_q);
6157 spin_unlock(ioa_cfg->host->host_lock);
6158 scsi_unblock_requests(ioa_cfg->host);
6159 spin_lock(ioa_cfg->host->host_lock);
6161 if (!ioa_cfg->allow_cmds)
6162 scsi_block_requests(ioa_cfg->host);
6165 return IPR_RC_JOB_RETURN;
6169 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6170 * @supported_dev: supported device struct
6171 * @vpids: vendor product id struct
6176 static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6177 struct ipr_std_inq_vpids *vpids)
6179 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6180 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6181 supported_dev->num_records = 1;
6182 supported_dev->data_length =
6183 cpu_to_be16(sizeof(struct ipr_supported_device));
6184 supported_dev->reserved = 0;
6188 * ipr_set_supported_devs - Send Set Supported Devices for a device
6189 * @ipr_cmd: ipr command struct
6191 * This function sends a Set Supported Devices to the adapter
6194 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6196 static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6198 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6199 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6200 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6201 struct ipr_resource_entry *res = ipr_cmd->u.res;
6203 ipr_cmd->job_step = ipr_ioa_reset_done;
6205 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6206 if (!ipr_is_scsi_disk(res))
6209 ipr_cmd->u.res = res;
6210 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6212 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6213 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6214 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6216 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6217 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6218 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6219 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6221 ipr_init_ioadl(ipr_cmd,
6222 ioa_cfg->vpd_cbs_dma +
6223 offsetof(struct ipr_misc_cbs, supp_dev),
6224 sizeof(struct ipr_supported_device),
6225 IPR_IOADL_FLAGS_WRITE_LAST);
6227 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6228 IPR_SET_SUP_DEVICE_TIMEOUT);
6230 if (!ioa_cfg->sis64)
6231 ipr_cmd->job_step = ipr_set_supported_devs;
6232 return IPR_RC_JOB_RETURN;
6235 return IPR_RC_JOB_CONTINUE;
6239 * ipr_get_mode_page - Locate specified mode page
6240 * @mode_pages: mode page buffer
6241 * @page_code: page code to find
6242 * @len: minimum required length for mode page
6245 * pointer to mode page / NULL on failure
6247 static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6248 u32 page_code, u32 len)
6250 struct ipr_mode_page_hdr *mode_hdr;
6254 if (!mode_pages || (mode_pages->hdr.length == 0))
6257 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6258 mode_hdr = (struct ipr_mode_page_hdr *)
6259 (mode_pages->data + mode_pages->hdr.block_desc_len);
6262 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6263 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6267 page_length = (sizeof(struct ipr_mode_page_hdr) +
6268 mode_hdr->page_length);
6269 length -= page_length;
6270 mode_hdr = (struct ipr_mode_page_hdr *)
6271 ((unsigned long)mode_hdr + page_length);
6278 * ipr_check_term_power - Check for term power errors
6279 * @ioa_cfg: ioa config struct
6280 * @mode_pages: IOAFP mode pages buffer
6282 * Check the IOAFP's mode page 28 for term power errors
6287 static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6288 struct ipr_mode_pages *mode_pages)
6292 struct ipr_dev_bus_entry *bus;
6293 struct ipr_mode_page28 *mode_page;
6295 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6296 sizeof(struct ipr_mode_page28));
6298 entry_length = mode_page->entry_length;
6300 bus = mode_page->bus;
6302 for (i = 0; i < mode_page->num_entries; i++) {
6303 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6304 dev_err(&ioa_cfg->pdev->dev,
6305 "Term power is absent on scsi bus %d\n",
6309 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6314 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6315 * @ioa_cfg: ioa config struct
6317 * Looks through the config table checking for SES devices. If
6318 * the SES device is in the SES table indicating a maximum SCSI
6319 * bus speed, the speed is limited for the bus.
6324 static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6329 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6330 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6331 ioa_cfg->bus_attr[i].bus_width);
6333 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6334 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6339 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6340 * @ioa_cfg: ioa config struct
6341 * @mode_pages: mode page 28 buffer
6343 * Updates mode page 28 based on driver configuration
6348 static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6349 struct ipr_mode_pages *mode_pages)
6351 int i, entry_length;
6352 struct ipr_dev_bus_entry *bus;
6353 struct ipr_bus_attributes *bus_attr;
6354 struct ipr_mode_page28 *mode_page;
6356 mode_page = ipr_get_mode_page(mode_pages, 0x28,
6357 sizeof(struct ipr_mode_page28));
6359 entry_length = mode_page->entry_length;
6361 /* Loop for each device bus entry */
6362 for (i = 0, bus = mode_page->bus;
6363 i < mode_page->num_entries;
6364 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6365 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6366 dev_err(&ioa_cfg->pdev->dev,
6367 "Invalid resource address reported: 0x%08X\n",
6368 IPR_GET_PHYS_LOC(bus->res_addr));
6372 bus_attr = &ioa_cfg->bus_attr[i];
6373 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6374 bus->bus_width = bus_attr->bus_width;
6375 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6376 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6377 if (bus_attr->qas_enabled)
6378 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6380 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6385 * ipr_build_mode_select - Build a mode select command
6386 * @ipr_cmd: ipr command struct
6387 * @res_handle: resource handle to send command to
6388 * @parm: Byte 2 of Mode Sense command
6389 * @dma_addr: DMA buffer address
6390 * @xfer_len: data transfer length
6395 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6396 __be32 res_handle, u8 parm,
6397 dma_addr_t dma_addr, u8 xfer_len)
6399 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6401 ioarcb->res_handle = res_handle;
6402 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6403 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6404 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6405 ioarcb->cmd_pkt.cdb[1] = parm;
6406 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6408 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6412 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6413 * @ipr_cmd: ipr command struct
6415 * This function sets up the SCSI bus attributes and sends
6416 * a Mode Select for Page 28 to activate them.
6421 static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6423 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6424 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6428 ipr_scsi_bus_speed_limit(ioa_cfg);
6429 ipr_check_term_power(ioa_cfg, mode_pages);
6430 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6431 length = mode_pages->hdr.length + 1;
6432 mode_pages->hdr.length = 0;
6434 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6435 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6438 ipr_cmd->job_step = ipr_set_supported_devs;
6439 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6440 struct ipr_resource_entry, queue);
6441 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6444 return IPR_RC_JOB_RETURN;
6448 * ipr_build_mode_sense - Builds a mode sense command
6449 * @ipr_cmd: ipr command struct
6450 * @res: resource entry struct
6451 * @parm: Byte 2 of mode sense command
6452 * @dma_addr: DMA address of mode sense buffer
6453 * @xfer_len: Size of DMA buffer
6458 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6460 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6462 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6464 ioarcb->res_handle = res_handle;
6465 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6466 ioarcb->cmd_pkt.cdb[2] = parm;
6467 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6468 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6470 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6474 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6475 * @ipr_cmd: ipr command struct
6477 * This function handles the failure of an IOA bringup command.
6482 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6484 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6485 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6487 dev_err(&ioa_cfg->pdev->dev,
6488 "0x%02X failed with IOASC: 0x%08X\n",
6489 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6491 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6492 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6493 return IPR_RC_JOB_RETURN;
6497 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6498 * @ipr_cmd: ipr command struct
6500 * This function handles the failure of a Mode Sense to the IOAFP.
6501 * Some adapters do not handle all mode pages.
6504 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6506 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6508 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6509 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6511 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6512 ipr_cmd->job_step = ipr_set_supported_devs;
6513 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6514 struct ipr_resource_entry, queue);
6515 return IPR_RC_JOB_CONTINUE;
6518 return ipr_reset_cmd_failed(ipr_cmd);
6522 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6523 * @ipr_cmd: ipr command struct
6525 * This function send a Page 28 mode sense to the IOA to
6526 * retrieve SCSI bus attributes.
6531 static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6533 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6536 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6537 0x28, ioa_cfg->vpd_cbs_dma +
6538 offsetof(struct ipr_misc_cbs, mode_pages),
6539 sizeof(struct ipr_mode_pages));
6541 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6542 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6544 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6547 return IPR_RC_JOB_RETURN;
6551 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6552 * @ipr_cmd: ipr command struct
6554 * This function enables dual IOA RAID support if possible.
6559 static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6561 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6562 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6563 struct ipr_mode_page24 *mode_page;
6567 mode_page = ipr_get_mode_page(mode_pages, 0x24,
6568 sizeof(struct ipr_mode_page24));
6571 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6573 length = mode_pages->hdr.length + 1;
6574 mode_pages->hdr.length = 0;
6576 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6577 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6580 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6581 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6584 return IPR_RC_JOB_RETURN;
6588 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6589 * @ipr_cmd: ipr command struct
6591 * This function handles the failure of a Mode Sense to the IOAFP.
6592 * Some adapters do not handle all mode pages.
6595 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6597 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6599 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
6601 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6602 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6603 return IPR_RC_JOB_CONTINUE;
6606 return ipr_reset_cmd_failed(ipr_cmd);
6610 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6611 * @ipr_cmd: ipr command struct
6613 * This function send a mode sense to the IOA to retrieve
6614 * the IOA Advanced Function Control mode page.
6619 static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6624 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6625 0x24, ioa_cfg->vpd_cbs_dma +
6626 offsetof(struct ipr_misc_cbs, mode_pages),
6627 sizeof(struct ipr_mode_pages));
6629 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6630 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6632 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6635 return IPR_RC_JOB_RETURN;
6639 * ipr_init_res_table - Initialize the resource table
6640 * @ipr_cmd: ipr command struct
6642 * This function looks through the existing resource table, comparing
6643 * it with the config table. This function will take care of old/new
6644 * devices and schedule adding/removing them from the mid-layer
6648 * IPR_RC_JOB_CONTINUE
6650 static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6653 struct ipr_resource_entry *res, *temp;
6654 struct ipr_config_table_entry_wrapper cfgtew;
6655 int entries, found, flag, i;
6660 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6662 flag = ioa_cfg->u.cfg_table->hdr.flags;
6664 if (flag & IPR_UCODE_DOWNLOAD_REQ)
6665 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6667 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6668 list_move_tail(&res->queue, &old_res);
6671 entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
6673 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6675 for (i = 0; i < entries; i++) {
6677 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6679 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6682 list_for_each_entry_safe(res, temp, &old_res, queue) {
6683 if (ipr_is_same_device(res, &cfgtew)) {
6684 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6691 if (list_empty(&ioa_cfg->free_res_q)) {
6692 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6697 res = list_entry(ioa_cfg->free_res_q.next,
6698 struct ipr_resource_entry, queue);
6699 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6700 ipr_init_res_entry(res, &cfgtew);
6705 ipr_update_res_entry(res, &cfgtew);
6708 list_for_each_entry_safe(res, temp, &old_res, queue) {
6710 res->del_from_ml = 1;
6711 res->res_handle = IPR_INVALID_RES_HANDLE;
6712 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6716 list_for_each_entry_safe(res, temp, &old_res, queue) {
6717 ipr_clear_res_target(res);
6718 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6721 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6722 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6724 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6727 return IPR_RC_JOB_CONTINUE;
6731 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6732 * @ipr_cmd: ipr command struct
6734 * This function sends a Query IOA Configuration command
6735 * to the adapter to retrieve the IOA configuration table.
6740 static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6742 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6743 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6744 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6745 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6748 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6749 ioa_cfg->dual_raid = 1;
6750 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6751 ucode_vpd->major_release, ucode_vpd->card_type,
6752 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6753 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6754 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6756 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6757 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6758 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6760 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6761 IPR_IOADL_FLAGS_READ_LAST);
6763 ipr_cmd->job_step = ipr_init_res_table;
6765 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6768 return IPR_RC_JOB_RETURN;
6772 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6773 * @ipr_cmd: ipr command struct
6775 * This utility function sends an inquiry to the adapter.
6780 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6781 dma_addr_t dma_addr, u8 xfer_len)
6783 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6786 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6787 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6789 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6790 ioarcb->cmd_pkt.cdb[1] = flags;
6791 ioarcb->cmd_pkt.cdb[2] = page;
6792 ioarcb->cmd_pkt.cdb[4] = xfer_len;
6794 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6796 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6801 * ipr_inquiry_page_supported - Is the given inquiry page supported
6802 * @page0: inquiry page 0 buffer
6805 * This function determines if the specified inquiry page is supported.
6808 * 1 if page is supported / 0 if not
6810 static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6814 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6815 if (page0->page[i] == page)
6822 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6823 * @ipr_cmd: ipr command struct
6825 * This function sends a Page 0xD0 inquiry to the adapter
6826 * to retrieve adapter capabilities.
6829 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6831 static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6834 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6835 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6838 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6839 memset(cap, 0, sizeof(*cap));
6841 if (ipr_inquiry_page_supported(page0, 0xD0)) {
6842 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6843 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6844 sizeof(struct ipr_inquiry_cap));
6845 return IPR_RC_JOB_RETURN;
6849 return IPR_RC_JOB_CONTINUE;
6853 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6854 * @ipr_cmd: ipr command struct
6856 * This function sends a Page 3 inquiry to the adapter
6857 * to retrieve software VPD information.
6860 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6862 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6864 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6868 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6870 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6871 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6872 sizeof(struct ipr_inquiry_page3));
6875 return IPR_RC_JOB_RETURN;
6879 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6880 * @ipr_cmd: ipr command struct
6882 * This function sends a Page 0 inquiry to the adapter
6883 * to retrieve supported inquiry pages.
6886 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6888 static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6890 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6895 /* Grab the type out of the VPD and store it away */
6896 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6898 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6900 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6902 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6903 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6904 sizeof(struct ipr_inquiry_page0));
6907 return IPR_RC_JOB_RETURN;
6911 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6912 * @ipr_cmd: ipr command struct
6914 * This function sends a standard inquiry to the adapter.
6919 static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6924 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6926 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6927 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6928 sizeof(struct ipr_ioa_vpd));
6931 return IPR_RC_JOB_RETURN;
6935 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6936 * @ipr_cmd: ipr command struct
6938 * This function send an Identify Host Request Response Queue
6939 * command to establish the HRRQ with the adapter.
6944 static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
6946 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6947 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6950 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
6952 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
6953 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6955 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6957 ioarcb->cmd_pkt.cdb[1] = 0x1;
6958 ioarcb->cmd_pkt.cdb[2] =
6959 ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
6960 ioarcb->cmd_pkt.cdb[3] =
6961 ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
6962 ioarcb->cmd_pkt.cdb[4] =
6963 ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
6964 ioarcb->cmd_pkt.cdb[5] =
6965 ((u64) ioa_cfg->host_rrq_dma) & 0xff;
6966 ioarcb->cmd_pkt.cdb[7] =
6967 ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
6968 ioarcb->cmd_pkt.cdb[8] =
6969 (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
6971 if (ioa_cfg->sis64) {
6972 ioarcb->cmd_pkt.cdb[10] =
6973 ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
6974 ioarcb->cmd_pkt.cdb[11] =
6975 ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
6976 ioarcb->cmd_pkt.cdb[12] =
6977 ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
6978 ioarcb->cmd_pkt.cdb[13] =
6979 ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
6982 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
6984 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6987 return IPR_RC_JOB_RETURN;
6991 * ipr_reset_timer_done - Adapter reset timer function
6992 * @ipr_cmd: ipr command struct
6994 * Description: This function is used in adapter reset processing
6995 * for timing events. If the reset_cmd pointer in the IOA
6996 * config struct is not this adapter's we are doing nested
6997 * resets and fail_all_ops will take care of freeing the
7003 static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7005 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7006 unsigned long lock_flags = 0;
7008 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7010 if (ioa_cfg->reset_cmd == ipr_cmd) {
7011 list_del(&ipr_cmd->queue);
7012 ipr_cmd->done(ipr_cmd);
7015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7019 * ipr_reset_start_timer - Start a timer for adapter reset job
7020 * @ipr_cmd: ipr command struct
7021 * @timeout: timeout value
7023 * Description: This function is used in adapter reset processing
7024 * for timing events. If the reset_cmd pointer in the IOA
7025 * config struct is not this adapter's we are doing nested
7026 * resets and fail_all_ops will take care of freeing the
7032 static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7033 unsigned long timeout)
7035 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7036 ipr_cmd->done = ipr_reset_ioa_job;
7038 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7039 ipr_cmd->timer.expires = jiffies + timeout;
7040 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7041 add_timer(&ipr_cmd->timer);
7045 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7046 * @ioa_cfg: ioa cfg struct
7051 static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7053 memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7055 /* Initialize Host RRQ pointers */
7056 ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7057 ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7058 ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7059 ioa_cfg->toggle_bit = 1;
7061 /* Zero out config table */
7062 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7066 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7067 * @ipr_cmd: ipr command struct
7070 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7072 static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7074 unsigned long stage, stage_time;
7076 volatile u32 int_reg;
7077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7080 feedback = readl(ioa_cfg->regs.init_feedback_reg);
7081 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7082 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7084 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7086 /* sanity check the stage_time value */
7087 if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7088 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7089 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7090 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7092 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7093 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7094 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7095 stage_time = ioa_cfg->transop_timeout;
7096 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7097 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7098 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7099 maskval = IPR_PCII_IPL_STAGE_CHANGE;
7100 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7101 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7102 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7103 return IPR_RC_JOB_CONTINUE;
7106 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7107 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7108 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7109 ipr_cmd->done = ipr_reset_ioa_job;
7110 add_timer(&ipr_cmd->timer);
7111 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7113 return IPR_RC_JOB_RETURN;
7117 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7118 * @ipr_cmd: ipr command struct
7120 * This function reinitializes some control blocks and
7121 * enables destructive diagnostics on the adapter.
7126 static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7128 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7129 volatile u32 int_reg;
7132 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7133 ipr_init_ioa_mem(ioa_cfg);
7135 ioa_cfg->allow_interrupts = 1;
7136 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7138 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7139 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7140 ioa_cfg->regs.clr_interrupt_mask_reg32);
7141 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7142 return IPR_RC_JOB_CONTINUE;
7145 /* Enable destructive diagnostics on IOA */
7146 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7148 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7150 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7152 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7154 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7156 if (ioa_cfg->sis64) {
7157 ipr_cmd->job_step = ipr_reset_next_stage;
7158 return IPR_RC_JOB_CONTINUE;
7161 ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7162 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7163 ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7164 ipr_cmd->done = ipr_reset_ioa_job;
7165 add_timer(&ipr_cmd->timer);
7166 list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7169 return IPR_RC_JOB_RETURN;
7173 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7174 * @ipr_cmd: ipr command struct
7176 * This function is invoked when an adapter dump has run out
7177 * of processing time.
7180 * IPR_RC_JOB_CONTINUE
7182 static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7184 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7186 if (ioa_cfg->sdt_state == GET_DUMP)
7187 ioa_cfg->sdt_state = ABORT_DUMP;
7189 ipr_cmd->job_step = ipr_reset_alert;
7191 return IPR_RC_JOB_CONTINUE;
7195 * ipr_unit_check_no_data - Log a unit check/no data error log
7196 * @ioa_cfg: ioa config struct
7198 * Logs an error indicating the adapter unit checked, but for some
7199 * reason, we were unable to fetch the unit check buffer.
7204 static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7206 ioa_cfg->errors_logged++;
7207 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7211 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7212 * @ioa_cfg: ioa config struct
7214 * Fetches the unit check buffer from the adapter by clocking the data
7215 * through the mailbox register.
7220 static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7222 unsigned long mailbox;
7223 struct ipr_hostrcb *hostrcb;
7224 struct ipr_uc_sdt sdt;
7228 mailbox = readl(ioa_cfg->ioa_mailbox);
7230 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7231 ipr_unit_check_no_data(ioa_cfg);
7235 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7236 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7237 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7239 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7240 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7241 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7242 ipr_unit_check_no_data(ioa_cfg);
7246 /* Find length of the first sdt entry (UC buffer) */
7247 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7248 length = be32_to_cpu(sdt.entry[0].end_token);
7250 length = (be32_to_cpu(sdt.entry[0].end_token) -
7251 be32_to_cpu(sdt.entry[0].start_token)) &
7252 IPR_FMT2_MBX_ADDR_MASK;
7254 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7255 struct ipr_hostrcb, queue);
7256 list_del(&hostrcb->queue);
7257 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7259 rc = ipr_get_ldump_data_section(ioa_cfg,
7260 be32_to_cpu(sdt.entry[0].start_token),
7261 (__be32 *)&hostrcb->hcam,
7262 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7265 ipr_handle_log_data(ioa_cfg, hostrcb);
7266 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7267 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7268 ioa_cfg->sdt_state == GET_DUMP)
7269 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7271 ipr_unit_check_no_data(ioa_cfg);
7273 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7277 * ipr_reset_restore_cfg_space - Restore PCI config space.
7278 * @ipr_cmd: ipr command struct
7280 * Description: This function restores the saved PCI config space of
7281 * the adapter, fails all outstanding ops back to the callers, and
7282 * fetches the dump/unit check if applicable to this reset.
7285 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7287 static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7293 ioa_cfg->pdev->state_saved = true;
7294 rc = pci_restore_state(ioa_cfg->pdev);
7296 if (rc != PCIBIOS_SUCCESSFUL) {
7297 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7298 return IPR_RC_JOB_CONTINUE;
7301 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7302 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7303 return IPR_RC_JOB_CONTINUE;
7306 ipr_fail_all_ops(ioa_cfg);
7308 if (ioa_cfg->ioa_unit_checked) {
7309 ioa_cfg->ioa_unit_checked = 0;
7310 ipr_get_unit_check_buffer(ioa_cfg);
7311 ipr_cmd->job_step = ipr_reset_alert;
7312 ipr_reset_start_timer(ipr_cmd, 0);
7313 return IPR_RC_JOB_RETURN;
7316 if (ioa_cfg->in_ioa_bringdown) {
7317 ipr_cmd->job_step = ipr_ioa_bringdown_done;
7319 ipr_cmd->job_step = ipr_reset_enable_ioa;
7321 if (GET_DUMP == ioa_cfg->sdt_state) {
7322 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7323 ipr_cmd->job_step = ipr_reset_wait_for_dump;
7324 schedule_work(&ioa_cfg->work_q);
7325 return IPR_RC_JOB_RETURN;
7330 return IPR_RC_JOB_CONTINUE;
7334 * ipr_reset_bist_done - BIST has completed on the adapter.
7335 * @ipr_cmd: ipr command struct
7337 * Description: Unblock config space and resume the reset process.
7340 * IPR_RC_JOB_CONTINUE
7342 static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7345 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7346 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7348 return IPR_RC_JOB_CONTINUE;
7352 * ipr_reset_start_bist - Run BIST on the adapter.
7353 * @ipr_cmd: ipr command struct
7355 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7358 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7360 static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7362 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7366 pci_block_user_cfg_access(ioa_cfg->pdev);
7367 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7369 if (rc != PCIBIOS_SUCCESSFUL) {
7370 pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7371 ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7372 rc = IPR_RC_JOB_CONTINUE;
7374 ipr_cmd->job_step = ipr_reset_bist_done;
7375 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7376 rc = IPR_RC_JOB_RETURN;
7384 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7385 * @ipr_cmd: ipr command struct
7387 * Description: This clears PCI reset to the adapter and delays two seconds.
7392 static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7395 pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7396 ipr_cmd->job_step = ipr_reset_bist_done;
7397 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7399 return IPR_RC_JOB_RETURN;
7403 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7404 * @ipr_cmd: ipr command struct
7406 * Description: This asserts PCI reset to the adapter.
7411 static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7413 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7414 struct pci_dev *pdev = ioa_cfg->pdev;
7417 pci_block_user_cfg_access(pdev);
7418 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7419 ipr_cmd->job_step = ipr_reset_slot_reset_done;
7420 ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7422 return IPR_RC_JOB_RETURN;
7426 * ipr_reset_allowed - Query whether or not IOA can be reset
7427 * @ioa_cfg: ioa config struct
7430 * 0 if reset not allowed / non-zero if reset is allowed
7432 static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7434 volatile u32 temp_reg;
7436 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7437 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7441 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7442 * @ipr_cmd: ipr command struct
7444 * Description: This function waits for adapter permission to run BIST,
7445 * then runs BIST. If the adapter does not give permission after a
7446 * reasonable time, we will reset the adapter anyway. The impact of
7447 * resetting the adapter without warning the adapter is the risk of
7448 * losing the persistent error log on the adapter. If the adapter is
7449 * reset while it is writing to the flash on the adapter, the flash
7450 * segment will have bad ECC and be zeroed.
7453 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7455 static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7457 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7458 int rc = IPR_RC_JOB_RETURN;
7460 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7461 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7462 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7464 ipr_cmd->job_step = ioa_cfg->reset;
7465 rc = IPR_RC_JOB_CONTINUE;
7472 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7473 * @ipr_cmd: ipr command struct
7475 * Description: This function alerts the adapter that it will be reset.
7476 * If memory space is not currently enabled, proceed directly
7477 * to running BIST on the adapter. The timer must always be started
7478 * so we guarantee we do not run BIST from ipr_isr.
7483 static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7485 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7490 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7492 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7493 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7494 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7495 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7497 ipr_cmd->job_step = ioa_cfg->reset;
7500 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7501 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7504 return IPR_RC_JOB_RETURN;
7508 * ipr_reset_ucode_download_done - Microcode download completion
7509 * @ipr_cmd: ipr command struct
7511 * Description: This function unmaps the microcode download buffer.
7514 * IPR_RC_JOB_CONTINUE
7516 static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7518 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7519 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7521 pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7522 sglist->num_sg, DMA_TO_DEVICE);
7524 ipr_cmd->job_step = ipr_reset_alert;
7525 return IPR_RC_JOB_CONTINUE;
7529 * ipr_reset_ucode_download - Download microcode to the adapter
7530 * @ipr_cmd: ipr command struct
7532 * Description: This function checks to see if it there is microcode
7533 * to download to the adapter. If there is, a download is performed.
7536 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7538 static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7540 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7541 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7544 ipr_cmd->job_step = ipr_reset_alert;
7547 return IPR_RC_JOB_CONTINUE;
7549 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7550 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7551 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7552 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7553 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7554 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7555 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7558 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7560 ipr_build_ucode_ioadl(ipr_cmd, sglist);
7561 ipr_cmd->job_step = ipr_reset_ucode_download_done;
7563 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7564 IPR_WRITE_BUFFER_TIMEOUT);
7567 return IPR_RC_JOB_RETURN;
7571 * ipr_reset_shutdown_ioa - Shutdown the adapter
7572 * @ipr_cmd: ipr command struct
7574 * Description: This function issues an adapter shutdown of the
7575 * specified type to the specified adapter as part of the
7576 * adapter reset job.
7579 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7581 static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7584 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7585 unsigned long timeout;
7586 int rc = IPR_RC_JOB_CONTINUE;
7589 if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7590 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7591 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7592 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7593 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7595 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7596 timeout = IPR_SHUTDOWN_TIMEOUT;
7597 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7598 timeout = IPR_INTERNAL_TIMEOUT;
7599 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7600 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7602 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7604 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7606 rc = IPR_RC_JOB_RETURN;
7607 ipr_cmd->job_step = ipr_reset_ucode_download;
7609 ipr_cmd->job_step = ipr_reset_alert;
7616 * ipr_reset_ioa_job - Adapter reset job
7617 * @ipr_cmd: ipr command struct
7619 * Description: This function is the job router for the adapter reset job.
7624 static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7627 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7630 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
7632 if (ioa_cfg->reset_cmd != ipr_cmd) {
7634 * We are doing nested adapter resets and this is
7635 * not the current reset job.
7637 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7641 if (IPR_IOASC_SENSE_KEY(ioasc)) {
7642 rc = ipr_cmd->job_step_failed(ipr_cmd);
7643 if (rc == IPR_RC_JOB_RETURN)
7647 ipr_reinit_ipr_cmnd(ipr_cmd);
7648 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7649 rc = ipr_cmd->job_step(ipr_cmd);
7650 } while(rc == IPR_RC_JOB_CONTINUE);
7654 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7655 * @ioa_cfg: ioa config struct
7656 * @job_step: first job step of reset job
7657 * @shutdown_type: shutdown type
7659 * Description: This function will initiate the reset of the given adapter
7660 * starting at the selected job step.
7661 * If the caller needs to wait on the completion of the reset,
7662 * the caller must sleep on the reset_wait_q.
7667 static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7668 int (*job_step) (struct ipr_cmnd *),
7669 enum ipr_shutdown_type shutdown_type)
7671 struct ipr_cmnd *ipr_cmd;
7673 ioa_cfg->in_reset_reload = 1;
7674 ioa_cfg->allow_cmds = 0;
7675 scsi_block_requests(ioa_cfg->host);
7677 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7678 ioa_cfg->reset_cmd = ipr_cmd;
7679 ipr_cmd->job_step = job_step;
7680 ipr_cmd->u.shutdown_type = shutdown_type;
7682 ipr_reset_ioa_job(ipr_cmd);
7686 * ipr_initiate_ioa_reset - Initiate an adapter reset
7687 * @ioa_cfg: ioa config struct
7688 * @shutdown_type: shutdown type
7690 * Description: This function will initiate the reset of the given adapter.
7691 * If the caller needs to wait on the completion of the reset,
7692 * the caller must sleep on the reset_wait_q.
7697 static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7698 enum ipr_shutdown_type shutdown_type)
7700 if (ioa_cfg->ioa_is_dead)
7703 if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7704 ioa_cfg->sdt_state = ABORT_DUMP;
7706 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7707 dev_err(&ioa_cfg->pdev->dev,
7708 "IOA taken offline - error recovery failed\n");
7710 ioa_cfg->reset_retries = 0;
7711 ioa_cfg->ioa_is_dead = 1;
7713 if (ioa_cfg->in_ioa_bringdown) {
7714 ioa_cfg->reset_cmd = NULL;
7715 ioa_cfg->in_reset_reload = 0;
7716 ipr_fail_all_ops(ioa_cfg);
7717 wake_up_all(&ioa_cfg->reset_wait_q);
7719 spin_unlock_irq(ioa_cfg->host->host_lock);
7720 scsi_unblock_requests(ioa_cfg->host);
7721 spin_lock_irq(ioa_cfg->host->host_lock);
7724 ioa_cfg->in_ioa_bringdown = 1;
7725 shutdown_type = IPR_SHUTDOWN_NONE;
7729 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7734 * ipr_reset_freeze - Hold off all I/O activity
7735 * @ipr_cmd: ipr command struct
7737 * Description: If the PCI slot is frozen, hold off all I/O
7738 * activity; then, as soon as the slot is available again,
7739 * initiate an adapter reset.
7741 static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7743 /* Disallow new interrupts, avoid loop */
7744 ipr_cmd->ioa_cfg->allow_interrupts = 0;
7745 list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7746 ipr_cmd->done = ipr_reset_ioa_job;
7747 return IPR_RC_JOB_RETURN;
7751 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7752 * @pdev: PCI device struct
7754 * Description: This routine is called to tell us that the PCI bus
7755 * is down. Can't do anything here, except put the device driver
7756 * into a holding pattern, waiting for the PCI bus to come back.
7758 static void ipr_pci_frozen(struct pci_dev *pdev)
7760 unsigned long flags = 0;
7761 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7763 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7764 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7765 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7769 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7770 * @pdev: PCI device struct
7772 * Description: This routine is called by the pci error recovery
7773 * code after the PCI slot has been reset, just before we
7774 * should resume normal operations.
7776 static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7778 unsigned long flags = 0;
7779 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7782 if (ioa_cfg->needs_warm_reset)
7783 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7785 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7788 return PCI_ERS_RESULT_RECOVERED;
7792 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7793 * @pdev: PCI device struct
7795 * Description: This routine is called when the PCI bus has
7796 * permanently failed.
7798 static void ipr_pci_perm_failure(struct pci_dev *pdev)
7800 unsigned long flags = 0;
7801 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7803 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7804 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7805 ioa_cfg->sdt_state = ABORT_DUMP;
7806 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7807 ioa_cfg->in_ioa_bringdown = 1;
7808 ioa_cfg->allow_cmds = 0;
7809 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7810 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7814 * ipr_pci_error_detected - Called when a PCI error is detected.
7815 * @pdev: PCI device struct
7816 * @state: PCI channel state
7818 * Description: Called when a PCI error is detected.
7821 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7823 static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7824 pci_channel_state_t state)
7827 case pci_channel_io_frozen:
7828 ipr_pci_frozen(pdev);
7829 return PCI_ERS_RESULT_NEED_RESET;
7830 case pci_channel_io_perm_failure:
7831 ipr_pci_perm_failure(pdev);
7832 return PCI_ERS_RESULT_DISCONNECT;
7837 return PCI_ERS_RESULT_NEED_RESET;
7841 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7842 * @ioa_cfg: ioa cfg struct
7844 * Description: This is the second phase of adapter intialization
7845 * This function takes care of initilizing the adapter to the point
7846 * where it can accept new commands.
7849 * 0 on success / -EIO on failure
7851 static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7854 unsigned long host_lock_flags = 0;
7857 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7858 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7859 if (ioa_cfg->needs_hard_reset) {
7860 ioa_cfg->needs_hard_reset = 0;
7861 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7863 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7866 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7867 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7868 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7870 if (ioa_cfg->ioa_is_dead) {
7872 } else if (ipr_invalid_adapter(ioa_cfg)) {
7876 dev_err(&ioa_cfg->pdev->dev,
7877 "Adapter not supported in this hardware configuration.\n");
7880 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7887 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7888 * @ioa_cfg: ioa config struct
7893 static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7897 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7898 if (ioa_cfg->ipr_cmnd_list[i])
7899 pci_pool_free(ioa_cfg->ipr_cmd_pool,
7900 ioa_cfg->ipr_cmnd_list[i],
7901 ioa_cfg->ipr_cmnd_list_dma[i]);
7903 ioa_cfg->ipr_cmnd_list[i] = NULL;
7906 if (ioa_cfg->ipr_cmd_pool)
7907 pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7909 ioa_cfg->ipr_cmd_pool = NULL;
7913 * ipr_free_mem - Frees memory allocated for an adapter
7914 * @ioa_cfg: ioa cfg struct
7919 static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7923 kfree(ioa_cfg->res_entries);
7924 pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7925 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7926 ipr_free_cmd_blks(ioa_cfg);
7927 pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7928 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7929 pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7930 ioa_cfg->u.cfg_table,
7931 ioa_cfg->cfg_table_dma);
7933 for (i = 0; i < IPR_NUM_HCAMS; i++) {
7934 pci_free_consistent(ioa_cfg->pdev,
7935 sizeof(struct ipr_hostrcb),
7936 ioa_cfg->hostrcb[i],
7937 ioa_cfg->hostrcb_dma[i]);
7940 ipr_free_dump(ioa_cfg);
7941 kfree(ioa_cfg->trace);
7945 * ipr_free_all_resources - Free all allocated resources for an adapter.
7946 * @ipr_cmd: ipr command struct
7948 * This function frees all allocated resources for the
7949 * specified adapter.
7954 static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
7956 struct pci_dev *pdev = ioa_cfg->pdev;
7959 free_irq(pdev->irq, ioa_cfg);
7960 pci_disable_msi(pdev);
7961 iounmap(ioa_cfg->hdw_dma_regs);
7962 pci_release_regions(pdev);
7963 ipr_free_mem(ioa_cfg);
7964 scsi_host_put(ioa_cfg->host);
7965 pci_disable_device(pdev);
7970 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
7971 * @ioa_cfg: ioa config struct
7974 * 0 on success / -ENOMEM on allocation failure
7976 static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7978 struct ipr_cmnd *ipr_cmd;
7979 struct ipr_ioarcb *ioarcb;
7980 dma_addr_t dma_addr;
7983 ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
7984 sizeof(struct ipr_cmnd), 16, 0);
7986 if (!ioa_cfg->ipr_cmd_pool)
7989 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7990 ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
7993 ipr_free_cmd_blks(ioa_cfg);
7997 memset(ipr_cmd, 0, sizeof(*ipr_cmd));
7998 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
7999 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8001 ioarcb = &ipr_cmd->ioarcb;
8002 ipr_cmd->dma_addr = dma_addr;
8004 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8006 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8008 ioarcb->host_response_handle = cpu_to_be32(i << 2);
8009 if (ioa_cfg->sis64) {
8010 ioarcb->u.sis64_addr_data.data_ioadl_addr =
8011 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8012 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8013 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8015 ioarcb->write_ioadl_addr =
8016 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8017 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8018 ioarcb->ioasa_host_pci_addr =
8019 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
8021 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8022 ipr_cmd->cmd_index = i;
8023 ipr_cmd->ioa_cfg = ioa_cfg;
8024 ipr_cmd->sense_buffer_dma = dma_addr +
8025 offsetof(struct ipr_cmnd, sense_buffer);
8027 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8034 * ipr_alloc_mem - Allocate memory for an adapter
8035 * @ioa_cfg: ioa config struct
8038 * 0 on success / non-zero for error
8040 static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8042 struct pci_dev *pdev = ioa_cfg->pdev;
8043 int i, rc = -ENOMEM;
8046 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8047 ioa_cfg->max_devs_supported, GFP_KERNEL);
8049 if (!ioa_cfg->res_entries)
8052 if (ioa_cfg->sis64) {
8053 ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8054 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8055 ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8056 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8057 ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8058 BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8061 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8062 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8063 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8066 ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8067 sizeof(struct ipr_misc_cbs),
8068 &ioa_cfg->vpd_cbs_dma);
8070 if (!ioa_cfg->vpd_cbs)
8071 goto out_free_res_entries;
8073 if (ipr_alloc_cmd_blks(ioa_cfg))
8074 goto out_free_vpd_cbs;
8076 ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8077 sizeof(u32) * IPR_NUM_CMD_BLKS,
8078 &ioa_cfg->host_rrq_dma);
8080 if (!ioa_cfg->host_rrq)
8081 goto out_ipr_free_cmd_blocks;
8083 ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8084 ioa_cfg->cfg_table_size,
8085 &ioa_cfg->cfg_table_dma);
8087 if (!ioa_cfg->u.cfg_table)
8088 goto out_free_host_rrq;
8090 for (i = 0; i < IPR_NUM_HCAMS; i++) {
8091 ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8092 sizeof(struct ipr_hostrcb),
8093 &ioa_cfg->hostrcb_dma[i]);
8095 if (!ioa_cfg->hostrcb[i])
8096 goto out_free_hostrcb_dma;
8098 ioa_cfg->hostrcb[i]->hostrcb_dma =
8099 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8100 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8101 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8104 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8105 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8107 if (!ioa_cfg->trace)
8108 goto out_free_hostrcb_dma;
8115 out_free_hostrcb_dma:
8117 pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8118 ioa_cfg->hostrcb[i],
8119 ioa_cfg->hostrcb_dma[i]);
8121 pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8122 ioa_cfg->u.cfg_table,
8123 ioa_cfg->cfg_table_dma);
8125 pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8126 ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8127 out_ipr_free_cmd_blocks:
8128 ipr_free_cmd_blks(ioa_cfg);
8130 pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8131 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8132 out_free_res_entries:
8133 kfree(ioa_cfg->res_entries);
8138 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8139 * @ioa_cfg: ioa config struct
8144 static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8148 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8149 ioa_cfg->bus_attr[i].bus = i;
8150 ioa_cfg->bus_attr[i].qas_enabled = 0;
8151 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8152 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8153 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8155 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8160 * ipr_init_ioa_cfg - Initialize IOA config struct
8161 * @ioa_cfg: ioa config struct
8162 * @host: scsi host struct
8163 * @pdev: PCI dev struct
8168 static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8169 struct Scsi_Host *host, struct pci_dev *pdev)
8171 const struct ipr_interrupt_offsets *p;
8172 struct ipr_interrupts *t;
8175 ioa_cfg->host = host;
8176 ioa_cfg->pdev = pdev;
8177 ioa_cfg->log_level = ipr_log_level;
8178 ioa_cfg->doorbell = IPR_DOORBELL;
8179 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8180 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8181 sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8182 sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8183 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8184 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8185 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8186 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8188 INIT_LIST_HEAD(&ioa_cfg->free_q);
8189 INIT_LIST_HEAD(&ioa_cfg->pending_q);
8190 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8191 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8192 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8193 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8194 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8195 init_waitqueue_head(&ioa_cfg->reset_wait_q);
8196 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8197 ioa_cfg->sdt_state = INACTIVE;
8199 ipr_initialize_bus_attr(ioa_cfg);
8200 ioa_cfg->max_devs_supported = ipr_max_devs;
8202 if (ioa_cfg->sis64) {
8203 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8204 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8205 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8206 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8208 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8209 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8210 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8211 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8213 host->max_channel = IPR_MAX_BUS_TO_SCAN;
8214 host->unique_id = host->host_no;
8215 host->max_cmd_len = IPR_MAX_CDB_LEN;
8216 pci_set_drvdata(pdev, ioa_cfg);
8218 p = &ioa_cfg->chip_cfg->regs;
8220 base = ioa_cfg->hdw_dma_regs;
8222 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8223 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8224 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8225 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8226 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8227 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8228 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8229 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8230 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8231 t->ioarrin_reg = base + p->ioarrin_reg;
8232 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8233 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8234 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8235 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8236 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8237 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8239 if (ioa_cfg->sis64) {
8240 t->init_feedback_reg = base + p->init_feedback_reg;
8241 t->dump_addr_reg = base + p->dump_addr_reg;
8242 t->dump_data_reg = base + p->dump_data_reg;
8247 * ipr_get_chip_info - Find adapter chip information
8248 * @dev_id: PCI device id struct
8251 * ptr to chip information on success / NULL on failure
8253 static const struct ipr_chip_t * __devinit
8254 ipr_get_chip_info(const struct pci_device_id *dev_id)
8258 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8259 if (ipr_chip[i].vendor == dev_id->vendor &&
8260 ipr_chip[i].device == dev_id->device)
8261 return &ipr_chip[i];
8266 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8267 * @pdev: PCI device struct
8269 * Description: Simply set the msi_received flag to 1 indicating that
8270 * Message Signaled Interrupts are supported.
8273 * 0 on success / non-zero on failure
8275 static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8277 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8278 unsigned long lock_flags = 0;
8279 irqreturn_t rc = IRQ_HANDLED;
8281 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8283 ioa_cfg->msi_received = 1;
8284 wake_up(&ioa_cfg->msi_wait_q);
8286 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8291 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8292 * @pdev: PCI device struct
8294 * Description: The return value from pci_enable_msi() can not always be
8295 * trusted. This routine sets up and initiates a test interrupt to determine
8296 * if the interrupt is received via the ipr_test_intr() service routine.
8297 * If the tests fails, the driver will fall back to LSI.
8300 * 0 on success / non-zero on failure
8302 static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8303 struct pci_dev *pdev)
8306 volatile u32 int_reg;
8307 unsigned long lock_flags = 0;
8311 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8312 init_waitqueue_head(&ioa_cfg->msi_wait_q);
8313 ioa_cfg->msi_received = 0;
8314 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8315 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8316 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8317 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8319 rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8321 dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8323 } else if (ipr_debug)
8324 dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8326 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8327 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8328 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8329 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8331 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8332 if (!ioa_cfg->msi_received) {
8333 /* MSI test failed */
8334 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
8336 } else if (ipr_debug)
8337 dev_info(&pdev->dev, "MSI test succeeded.\n");
8339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8341 free_irq(pdev->irq, ioa_cfg);
8349 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8350 * @pdev: PCI device struct
8351 * @dev_id: PCI device id struct
8354 * 0 on success / non-zero on failure
8356 static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8357 const struct pci_device_id *dev_id)
8359 struct ipr_ioa_cfg *ioa_cfg;
8360 struct Scsi_Host *host;
8361 unsigned long ipr_regs_pci;
8362 void __iomem *ipr_regs;
8363 int rc = PCIBIOS_SUCCESSFUL;
8364 volatile u32 mask, uproc, interrupts;
8368 if ((rc = pci_enable_device(pdev))) {
8369 dev_err(&pdev->dev, "Cannot enable adapter\n");
8373 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8375 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8378 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8383 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8384 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8385 ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8386 sata_port_info.flags, &ipr_sata_ops);
8388 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8390 if (!ioa_cfg->ipr_chip) {
8391 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8392 dev_id->vendor, dev_id->device);
8393 goto out_scsi_host_put;
8396 /* set SIS 32 or SIS 64 */
8397 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8398 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8400 if (ipr_transop_timeout)
8401 ioa_cfg->transop_timeout = ipr_transop_timeout;
8402 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8403 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8405 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8407 ioa_cfg->revid = pdev->revision;
8409 ipr_regs_pci = pci_resource_start(pdev, 0);
8411 rc = pci_request_regions(pdev, IPR_NAME);
8414 "Couldn't register memory range of registers\n");
8415 goto out_scsi_host_put;
8418 ipr_regs = pci_ioremap_bar(pdev, 0);
8422 "Couldn't map memory range of registers\n");
8424 goto out_release_regions;
8427 ioa_cfg->hdw_dma_regs = ipr_regs;
8428 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8429 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8431 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8433 pci_set_master(pdev);
8435 if (ioa_cfg->sis64) {
8436 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8438 dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8439 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8443 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8446 dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8450 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8451 ioa_cfg->chip_cfg->cache_line_size);
8453 if (rc != PCIBIOS_SUCCESSFUL) {
8454 dev_err(&pdev->dev, "Write of cache line size failed\n");
8459 /* Enable MSI style interrupts if they are supported. */
8460 if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8461 rc = ipr_test_msi(ioa_cfg, pdev);
8462 if (rc == -EOPNOTSUPP)
8463 pci_disable_msi(pdev);
8465 goto out_msi_disable;
8467 dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8468 } else if (ipr_debug)
8469 dev_info(&pdev->dev, "Cannot enable MSI.\n");
8471 /* Save away PCI config space for use following IOA reset */
8472 rc = pci_save_state(pdev);
8474 if (rc != PCIBIOS_SUCCESSFUL) {
8475 dev_err(&pdev->dev, "Failed to save PCI config space\n");
8480 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8483 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8487 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8488 + ((sizeof(struct ipr_config_table_entry64)
8489 * ioa_cfg->max_devs_supported)));
8491 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8492 + ((sizeof(struct ipr_config_table_entry)
8493 * ioa_cfg->max_devs_supported)));
8495 rc = ipr_alloc_mem(ioa_cfg);
8498 "Couldn't allocate enough memory for device driver!\n");
8503 * If HRRQ updated interrupt is not masked, or reset alert is set,
8504 * the card is in an unknown state and needs a hard reset
8506 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8507 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8508 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8509 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8510 ioa_cfg->needs_hard_reset = 1;
8511 if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8512 ioa_cfg->needs_hard_reset = 1;
8513 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8514 ioa_cfg->ioa_unit_checked = 1;
8516 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8517 rc = request_irq(pdev->irq, ipr_isr,
8518 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8522 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8527 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8528 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8529 ioa_cfg->needs_warm_reset = 1;
8530 ioa_cfg->reset = ipr_reset_slot_reset;
8532 ioa_cfg->reset = ipr_reset_start_bist;
8534 spin_lock(&ipr_driver_lock);
8535 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8536 spin_unlock(&ipr_driver_lock);
8543 ipr_free_mem(ioa_cfg);
8547 pci_disable_msi(pdev);
8548 out_release_regions:
8549 pci_release_regions(pdev);
8551 scsi_host_put(host);
8553 pci_disable_device(pdev);
8558 * ipr_scan_vsets - Scans for VSET devices
8559 * @ioa_cfg: ioa config struct
8561 * Description: Since the VSET resources do not follow SAM in that we can have
8562 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8567 static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8571 for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8572 for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8573 scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8577 * ipr_initiate_ioa_bringdown - Bring down an adapter
8578 * @ioa_cfg: ioa config struct
8579 * @shutdown_type: shutdown type
8581 * Description: This function will initiate bringing down the adapter.
8582 * This consists of issuing an IOA shutdown to the adapter
8583 * to flush the cache, and running BIST.
8584 * If the caller needs to wait on the completion of the reset,
8585 * the caller must sleep on the reset_wait_q.
8590 static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8591 enum ipr_shutdown_type shutdown_type)
8594 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8595 ioa_cfg->sdt_state = ABORT_DUMP;
8596 ioa_cfg->reset_retries = 0;
8597 ioa_cfg->in_ioa_bringdown = 1;
8598 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8603 * __ipr_remove - Remove a single adapter
8604 * @pdev: pci device struct
8606 * Adapter hot plug remove entry point.
8611 static void __ipr_remove(struct pci_dev *pdev)
8613 unsigned long host_lock_flags = 0;
8614 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8617 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8618 while(ioa_cfg->in_reset_reload) {
8619 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8620 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8621 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8624 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8627 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8628 flush_scheduled_work();
8629 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8631 spin_lock(&ipr_driver_lock);
8632 list_del(&ioa_cfg->queue);
8633 spin_unlock(&ipr_driver_lock);
8635 if (ioa_cfg->sdt_state == ABORT_DUMP)
8636 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8639 ipr_free_all_resources(ioa_cfg);
8645 * ipr_remove - IOA hot plug remove entry point
8646 * @pdev: pci device struct
8648 * Adapter hot plug remove entry point.
8653 static void __devexit ipr_remove(struct pci_dev *pdev)
8655 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8659 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8661 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8663 scsi_remove_host(ioa_cfg->host);
8671 * ipr_probe - Adapter hot plug add entry point
8674 * 0 on success / non-zero on failure
8676 static int __devinit ipr_probe(struct pci_dev *pdev,
8677 const struct pci_device_id *dev_id)
8679 struct ipr_ioa_cfg *ioa_cfg;
8682 rc = ipr_probe_ioa(pdev, dev_id);
8687 ioa_cfg = pci_get_drvdata(pdev);
8688 rc = ipr_probe_ioa_part2(ioa_cfg);
8695 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8702 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8706 scsi_remove_host(ioa_cfg->host);
8711 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8715 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8717 scsi_remove_host(ioa_cfg->host);
8722 scsi_scan_host(ioa_cfg->host);
8723 ipr_scan_vsets(ioa_cfg);
8724 scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8725 ioa_cfg->allow_ml_add_del = 1;
8726 ioa_cfg->host->max_channel = IPR_VSET_BUS;
8727 schedule_work(&ioa_cfg->work_q);
8732 * ipr_shutdown - Shutdown handler.
8733 * @pdev: pci device struct
8735 * This function is invoked upon system shutdown/reboot. It will issue
8736 * an adapter shutdown to the adapter to flush the write cache.
8741 static void ipr_shutdown(struct pci_dev *pdev)
8743 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8744 unsigned long lock_flags = 0;
8746 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8747 while(ioa_cfg->in_reset_reload) {
8748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8750 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8753 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8754 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8755 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8758 static struct pci_device_id ipr_pci_table[] __devinitdata = {
8759 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8761 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8763 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8765 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8775 IPR_USE_LONG_TRANSOP_TIMEOUT },
8776 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8777 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8778 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8779 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8780 IPR_USE_LONG_TRANSOP_TIMEOUT },
8781 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8782 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8783 IPR_USE_LONG_TRANSOP_TIMEOUT },
8784 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8785 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8786 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8787 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8788 IPR_USE_LONG_TRANSOP_TIMEOUT},
8789 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8790 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8791 IPR_USE_LONG_TRANSOP_TIMEOUT },
8792 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8793 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8794 IPR_USE_LONG_TRANSOP_TIMEOUT },
8795 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8796 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
8797 IPR_USE_LONG_TRANSOP_TIMEOUT },
8798 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8799 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8800 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8801 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8802 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8803 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8804 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8805 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8806 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8807 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8808 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8809 IPR_USE_LONG_TRANSOP_TIMEOUT },
8810 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8811 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8812 IPR_USE_LONG_TRANSOP_TIMEOUT },
8813 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
8814 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
8815 IPR_USE_LONG_TRANSOP_TIMEOUT },
8818 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8820 static struct pci_error_handlers ipr_err_handler = {
8821 .error_detected = ipr_pci_error_detected,
8822 .slot_reset = ipr_pci_slot_reset,
8825 static struct pci_driver ipr_driver = {
8827 .id_table = ipr_pci_table,
8829 .remove = __devexit_p(ipr_remove),
8830 .shutdown = ipr_shutdown,
8831 .err_handler = &ipr_err_handler,
8835 * ipr_halt_done - Shutdown prepare completion
8840 static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8842 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8844 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8848 * ipr_halt - Issue shutdown prepare to all adapters
8851 * NOTIFY_OK on success / NOTIFY_DONE on failure
8853 static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8855 struct ipr_cmnd *ipr_cmd;
8856 struct ipr_ioa_cfg *ioa_cfg;
8857 unsigned long flags = 0;
8859 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8862 spin_lock(&ipr_driver_lock);
8864 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8865 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8866 if (!ioa_cfg->allow_cmds) {
8867 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8871 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8872 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8873 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8874 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8875 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8877 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8878 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8880 spin_unlock(&ipr_driver_lock);
8885 static struct notifier_block ipr_notifier = {
8890 * ipr_init - Module entry point
8893 * 0 on success / negative value on failure
8895 static int __init ipr_init(void)
8897 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8898 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8900 register_reboot_notifier(&ipr_notifier);
8901 return pci_register_driver(&ipr_driver);
8905 * ipr_exit - Module unload
8907 * Module unload entry point.
8912 static void __exit ipr_exit(void)
8914 unregister_reboot_notifier(&ipr_notifier);
8915 pci_unregister_driver(&ipr_driver);
8918 module_init(ipr_init);
8919 module_exit(ipr_exit);