2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2014-2015 PMC-Sierra, Inc.
4 * Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
15 * Questions/Comments/Bugfixes to storagedev@pmcs.com
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/types.h>
22 #include <linux/pci.h>
23 #include <linux/pci-aspm.h>
24 #include <linux/kernel.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/timer.h>
29 #include <linux/init.h>
30 #include <linux/spinlock.h>
31 #include <linux/compat.h>
32 #include <linux/blktrace_api.h>
33 #include <linux/uaccess.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/completion.h>
37 #include <linux/moduleparam.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_eh.h>
44 #include <scsi/scsi_dbg.h>
45 #include <linux/cciss_ioctl.h>
46 #include <linux/string.h>
47 #include <linux/bitmap.h>
48 #include <linux/atomic.h>
49 #include <linux/jiffies.h>
50 #include <linux/percpu-defs.h>
51 #include <linux/percpu.h>
52 #include <asm/unaligned.h>
53 #include <asm/div64.h>
57 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
58 #define HPSA_DRIVER_VERSION "3.4.10-0"
59 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
62 /* How long to wait for CISS doorbell communication */
63 #define CLEAR_EVENT_WAIT_INTERVAL 20 /* ms for each msleep() call */
64 #define MODE_CHANGE_WAIT_INTERVAL 10 /* ms for each msleep() call */
65 #define MAX_CLEAR_EVENT_WAIT 30000 /* times 20 ms = 600 s */
66 #define MAX_MODE_CHANGE_WAIT 2000 /* times 10 ms = 20 s */
67 #define MAX_IOCTL_CONFIG_WAIT 1000
69 /*define how many times we will try a command because of bus resets */
70 #define MAX_CMD_RETRIES 3
72 /* Embedded module documentation macros - see modules.h */
73 MODULE_AUTHOR("Hewlett-Packard Company");
74 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
76 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
77 MODULE_VERSION(HPSA_DRIVER_VERSION);
78 MODULE_LICENSE("GPL");
80 static int hpsa_allow_any;
81 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
82 MODULE_PARM_DESC(hpsa_allow_any,
83 "Allow hpsa driver to access unknown HP Smart Array hardware");
84 static int hpsa_simple_mode;
85 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
86 MODULE_PARM_DESC(hpsa_simple_mode,
87 "Use 'simple mode' rather than 'performant mode'");
89 /* define the PCI info for the cards we can control */
90 static const struct pci_device_id hpsa_pci_device_id[] = {
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
129 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
130 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
131 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0580},
132 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0581},
133 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0582},
134 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0583},
135 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0584},
136 {PCI_VENDOR_ID_ADAPTEC2, 0x0290, 0x9005, 0x0585},
137 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
138 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
139 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
140 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
141 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
142 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
143 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
147 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
149 /* board_id = Subsystem Device ID & Vendor ID
150 * product = Marketing Name for the board
151 * access = Address of the struct of function pointers
153 static struct board_type products[] = {
154 {0x3241103C, "Smart Array P212", &SA5_access},
155 {0x3243103C, "Smart Array P410", &SA5_access},
156 {0x3245103C, "Smart Array P410i", &SA5_access},
157 {0x3247103C, "Smart Array P411", &SA5_access},
158 {0x3249103C, "Smart Array P812", &SA5_access},
159 {0x324A103C, "Smart Array P712m", &SA5_access},
160 {0x324B103C, "Smart Array P711m", &SA5_access},
161 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
162 {0x3350103C, "Smart Array P222", &SA5_access},
163 {0x3351103C, "Smart Array P420", &SA5_access},
164 {0x3352103C, "Smart Array P421", &SA5_access},
165 {0x3353103C, "Smart Array P822", &SA5_access},
166 {0x3354103C, "Smart Array P420i", &SA5_access},
167 {0x3355103C, "Smart Array P220i", &SA5_access},
168 {0x3356103C, "Smart Array P721m", &SA5_access},
169 {0x1921103C, "Smart Array P830i", &SA5_access},
170 {0x1922103C, "Smart Array P430", &SA5_access},
171 {0x1923103C, "Smart Array P431", &SA5_access},
172 {0x1924103C, "Smart Array P830", &SA5_access},
173 {0x1926103C, "Smart Array P731m", &SA5_access},
174 {0x1928103C, "Smart Array P230i", &SA5_access},
175 {0x1929103C, "Smart Array P530", &SA5_access},
176 {0x21BD103C, "Smart Array P244br", &SA5_access},
177 {0x21BE103C, "Smart Array P741m", &SA5_access},
178 {0x21BF103C, "Smart HBA H240ar", &SA5_access},
179 {0x21C0103C, "Smart Array P440ar", &SA5_access},
180 {0x21C1103C, "Smart Array P840ar", &SA5_access},
181 {0x21C2103C, "Smart Array P440", &SA5_access},
182 {0x21C3103C, "Smart Array P441", &SA5_access},
183 {0x21C4103C, "Smart Array", &SA5_access},
184 {0x21C5103C, "Smart Array P841", &SA5_access},
185 {0x21C6103C, "Smart HBA H244br", &SA5_access},
186 {0x21C7103C, "Smart HBA H240", &SA5_access},
187 {0x21C8103C, "Smart HBA H241", &SA5_access},
188 {0x21C9103C, "Smart Array", &SA5_access},
189 {0x21CA103C, "Smart Array P246br", &SA5_access},
190 {0x21CB103C, "Smart Array P840", &SA5_access},
191 {0x21CC103C, "Smart Array", &SA5_access},
192 {0x21CD103C, "Smart Array", &SA5_access},
193 {0x21CE103C, "Smart HBA", &SA5_access},
194 {0x05809005, "SmartHBA-SA", &SA5_access},
195 {0x05819005, "SmartHBA-SA 8i", &SA5_access},
196 {0x05829005, "SmartHBA-SA 8i8e", &SA5_access},
197 {0x05839005, "SmartHBA-SA 8e", &SA5_access},
198 {0x05849005, "SmartHBA-SA 16i", &SA5_access},
199 {0x05859005, "SmartHBA-SA 4i4e", &SA5_access},
200 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
201 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
202 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
203 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
204 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
205 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
208 #define SCSI_CMD_BUSY ((struct scsi_cmnd *)&hpsa_cmd_busy)
209 static const struct scsi_cmnd hpsa_cmd_busy;
210 #define SCSI_CMD_IDLE ((struct scsi_cmnd *)&hpsa_cmd_idle)
211 static const struct scsi_cmnd hpsa_cmd_idle;
212 static int number_of_controllers;
214 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
215 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
216 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
219 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
223 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
224 static struct CommandList *cmd_alloc(struct ctlr_info *h);
225 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c);
226 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
227 struct scsi_cmnd *scmd);
228 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
229 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
231 static void hpsa_free_cmd_pool(struct ctlr_info *h);
232 #define VPD_PAGE (1 << 8)
233 #define HPSA_SIMPLE_ERROR_BITS 0x03
235 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
236 static void hpsa_scan_start(struct Scsi_Host *);
237 static int hpsa_scan_finished(struct Scsi_Host *sh,
238 unsigned long elapsed_time);
239 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
241 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
242 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
243 static int hpsa_slave_alloc(struct scsi_device *sdev);
244 static int hpsa_slave_configure(struct scsi_device *sdev);
245 static void hpsa_slave_destroy(struct scsi_device *sdev);
247 static void hpsa_update_scsi_devices(struct ctlr_info *h);
248 static int check_for_unit_attention(struct ctlr_info *h,
249 struct CommandList *c);
250 static void check_ioctl_unit_attention(struct ctlr_info *h,
251 struct CommandList *c);
252 /* performant mode helper functions */
253 static void calc_bucket_map(int *bucket, int num_buckets,
254 int nsgs, int min_blocks, u32 *bucket_map);
255 static void hpsa_free_performant_mode(struct ctlr_info *h);
256 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
257 static inline u32 next_command(struct ctlr_info *h, u8 q);
258 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
259 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
261 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
262 unsigned long *memory_bar);
263 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
264 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
266 static inline void finish_cmd(struct CommandList *c);
267 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
268 #define BOARD_NOT_READY 0
269 #define BOARD_READY 1
270 static void hpsa_drain_accel_commands(struct ctlr_info *h);
271 static void hpsa_flush_cache(struct ctlr_info *h);
272 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
273 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
274 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
275 static void hpsa_command_resubmit_worker(struct work_struct *work);
276 static u32 lockup_detected(struct ctlr_info *h);
277 static int detect_controller_lockup(struct ctlr_info *h);
278 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device);
280 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
282 unsigned long *priv = shost_priv(sdev->host);
283 return (struct ctlr_info *) *priv;
286 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
288 unsigned long *priv = shost_priv(sh);
289 return (struct ctlr_info *) *priv;
292 static inline bool hpsa_is_cmd_idle(struct CommandList *c)
294 return c->scsi_cmd == SCSI_CMD_IDLE;
297 static inline bool hpsa_is_pending_event(struct CommandList *c)
299 return c->abort_pending || c->reset_pending;
302 /* extract sense key, asc, and ascq from sense data. -1 means invalid. */
303 static void decode_sense_data(const u8 *sense_data, int sense_data_len,
304 u8 *sense_key, u8 *asc, u8 *ascq)
306 struct scsi_sense_hdr sshdr;
313 if (sense_data_len < 1)
316 rc = scsi_normalize_sense(sense_data, sense_data_len, &sshdr);
318 *sense_key = sshdr.sense_key;
324 static int check_for_unit_attention(struct ctlr_info *h,
325 struct CommandList *c)
327 u8 sense_key, asc, ascq;
330 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
331 sense_len = sizeof(c->err_info->SenseInfo);
333 sense_len = c->err_info->SenseLen;
335 decode_sense_data(c->err_info->SenseInfo, sense_len,
336 &sense_key, &asc, &ascq);
337 if (sense_key != UNIT_ATTENTION || asc == 0xff)
342 dev_warn(&h->pdev->dev,
343 "%s: a state change detected, command retried\n",
347 dev_warn(&h->pdev->dev,
348 "%s: LUN failure detected\n", h->devname);
350 case REPORT_LUNS_CHANGED:
351 dev_warn(&h->pdev->dev,
352 "%s: report LUN data changed\n", h->devname);
354 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
355 * target (array) devices.
359 dev_warn(&h->pdev->dev,
360 "%s: a power on or device reset detected\n",
363 case UNIT_ATTENTION_CLEARED:
364 dev_warn(&h->pdev->dev,
365 "%s: unit attention cleared by another initiator\n",
369 dev_warn(&h->pdev->dev,
370 "%s: unknown unit attention detected\n",
377 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
379 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
380 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
381 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
383 dev_warn(&h->pdev->dev, HPSA "device busy");
387 static u32 lockup_detected(struct ctlr_info *h);
388 static ssize_t host_show_lockup_detected(struct device *dev,
389 struct device_attribute *attr, char *buf)
393 struct Scsi_Host *shost = class_to_shost(dev);
395 h = shost_to_hba(shost);
396 ld = lockup_detected(h);
398 return sprintf(buf, "ld=%d\n", ld);
401 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
402 struct device_attribute *attr,
403 const char *buf, size_t count)
407 struct Scsi_Host *shost = class_to_shost(dev);
410 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
412 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
413 strncpy(tmpbuf, buf, len);
415 if (sscanf(tmpbuf, "%d", &status) != 1)
417 h = shost_to_hba(shost);
418 h->acciopath_status = !!status;
419 dev_warn(&h->pdev->dev,
420 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
421 h->acciopath_status ? "enabled" : "disabled");
425 static ssize_t host_store_raid_offload_debug(struct device *dev,
426 struct device_attribute *attr,
427 const char *buf, size_t count)
429 int debug_level, len;
431 struct Scsi_Host *shost = class_to_shost(dev);
434 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
436 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
437 strncpy(tmpbuf, buf, len);
439 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
443 h = shost_to_hba(shost);
444 h->raid_offload_debug = debug_level;
445 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
446 h->raid_offload_debug);
450 static ssize_t host_store_rescan(struct device *dev,
451 struct device_attribute *attr,
452 const char *buf, size_t count)
455 struct Scsi_Host *shost = class_to_shost(dev);
456 h = shost_to_hba(shost);
457 hpsa_scan_start(h->scsi_host);
461 static ssize_t host_show_firmware_revision(struct device *dev,
462 struct device_attribute *attr, char *buf)
465 struct Scsi_Host *shost = class_to_shost(dev);
466 unsigned char *fwrev;
468 h = shost_to_hba(shost);
469 if (!h->hba_inquiry_data)
471 fwrev = &h->hba_inquiry_data[32];
472 return snprintf(buf, 20, "%c%c%c%c\n",
473 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
476 static ssize_t host_show_commands_outstanding(struct device *dev,
477 struct device_attribute *attr, char *buf)
479 struct Scsi_Host *shost = class_to_shost(dev);
480 struct ctlr_info *h = shost_to_hba(shost);
482 return snprintf(buf, 20, "%d\n",
483 atomic_read(&h->commands_outstanding));
486 static ssize_t host_show_transport_mode(struct device *dev,
487 struct device_attribute *attr, char *buf)
490 struct Scsi_Host *shost = class_to_shost(dev);
492 h = shost_to_hba(shost);
493 return snprintf(buf, 20, "%s\n",
494 h->transMethod & CFGTBL_Trans_Performant ?
495 "performant" : "simple");
498 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
499 struct device_attribute *attr, char *buf)
502 struct Scsi_Host *shost = class_to_shost(dev);
504 h = shost_to_hba(shost);
505 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
506 (h->acciopath_status == 1) ? "enabled" : "disabled");
509 /* List of controllers which cannot be hard reset on kexec with reset_devices */
510 static u32 unresettable_controller[] = {
511 0x324a103C, /* Smart Array P712m */
512 0x324b103C, /* Smart Array P711m */
513 0x3223103C, /* Smart Array P800 */
514 0x3234103C, /* Smart Array P400 */
515 0x3235103C, /* Smart Array P400i */
516 0x3211103C, /* Smart Array E200i */
517 0x3212103C, /* Smart Array E200 */
518 0x3213103C, /* Smart Array E200i */
519 0x3214103C, /* Smart Array E200i */
520 0x3215103C, /* Smart Array E200i */
521 0x3237103C, /* Smart Array E500 */
522 0x323D103C, /* Smart Array P700m */
523 0x40800E11, /* Smart Array 5i */
524 0x409C0E11, /* Smart Array 6400 */
525 0x409D0E11, /* Smart Array 6400 EM */
526 0x40700E11, /* Smart Array 5300 */
527 0x40820E11, /* Smart Array 532 */
528 0x40830E11, /* Smart Array 5312 */
529 0x409A0E11, /* Smart Array 641 */
530 0x409B0E11, /* Smart Array 642 */
531 0x40910E11, /* Smart Array 6i */
534 /* List of controllers which cannot even be soft reset */
535 static u32 soft_unresettable_controller[] = {
536 0x40800E11, /* Smart Array 5i */
537 0x40700E11, /* Smart Array 5300 */
538 0x40820E11, /* Smart Array 532 */
539 0x40830E11, /* Smart Array 5312 */
540 0x409A0E11, /* Smart Array 641 */
541 0x409B0E11, /* Smart Array 642 */
542 0x40910E11, /* Smart Array 6i */
543 /* Exclude 640x boards. These are two pci devices in one slot
544 * which share a battery backed cache module. One controls the
545 * cache, the other accesses the cache through the one that controls
546 * it. If we reset the one controlling the cache, the other will
547 * likely not be happy. Just forbid resetting this conjoined mess.
548 * The 640x isn't really supported by hpsa anyway.
550 0x409C0E11, /* Smart Array 6400 */
551 0x409D0E11, /* Smart Array 6400 EM */
554 static u32 needs_abort_tags_swizzled[] = {
555 0x323D103C, /* Smart Array P700m */
556 0x324a103C, /* Smart Array P712m */
557 0x324b103C, /* SmartArray P711m */
560 static int board_id_in_array(u32 a[], int nelems, u32 board_id)
564 for (i = 0; i < nelems; i++)
565 if (a[i] == board_id)
570 static int ctlr_is_hard_resettable(u32 board_id)
572 return !board_id_in_array(unresettable_controller,
573 ARRAY_SIZE(unresettable_controller), board_id);
576 static int ctlr_is_soft_resettable(u32 board_id)
578 return !board_id_in_array(soft_unresettable_controller,
579 ARRAY_SIZE(soft_unresettable_controller), board_id);
582 static int ctlr_is_resettable(u32 board_id)
584 return ctlr_is_hard_resettable(board_id) ||
585 ctlr_is_soft_resettable(board_id);
588 static int ctlr_needs_abort_tags_swizzled(u32 board_id)
590 return board_id_in_array(needs_abort_tags_swizzled,
591 ARRAY_SIZE(needs_abort_tags_swizzled), board_id);
594 static ssize_t host_show_resettable(struct device *dev,
595 struct device_attribute *attr, char *buf)
598 struct Scsi_Host *shost = class_to_shost(dev);
600 h = shost_to_hba(shost);
601 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
604 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
606 return (scsi3addr[3] & 0xC0) == 0x40;
609 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
610 "1(+0)ADM", "UNKNOWN"
612 #define HPSA_RAID_0 0
613 #define HPSA_RAID_4 1
614 #define HPSA_RAID_1 2 /* also used for RAID 10 */
615 #define HPSA_RAID_5 3 /* also used for RAID 50 */
616 #define HPSA_RAID_51 4
617 #define HPSA_RAID_6 5 /* also used for RAID 60 */
618 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
619 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
621 static inline bool is_logical_device(struct hpsa_scsi_dev_t *device)
623 return !device->physical_device;
626 static ssize_t raid_level_show(struct device *dev,
627 struct device_attribute *attr, char *buf)
630 unsigned char rlevel;
632 struct scsi_device *sdev;
633 struct hpsa_scsi_dev_t *hdev;
636 sdev = to_scsi_device(dev);
637 h = sdev_to_hba(sdev);
638 spin_lock_irqsave(&h->lock, flags);
639 hdev = sdev->hostdata;
641 spin_unlock_irqrestore(&h->lock, flags);
645 /* Is this even a logical drive? */
646 if (!is_logical_device(hdev)) {
647 spin_unlock_irqrestore(&h->lock, flags);
648 l = snprintf(buf, PAGE_SIZE, "N/A\n");
652 rlevel = hdev->raid_level;
653 spin_unlock_irqrestore(&h->lock, flags);
654 if (rlevel > RAID_UNKNOWN)
655 rlevel = RAID_UNKNOWN;
656 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
660 static ssize_t lunid_show(struct device *dev,
661 struct device_attribute *attr, char *buf)
664 struct scsi_device *sdev;
665 struct hpsa_scsi_dev_t *hdev;
667 unsigned char lunid[8];
669 sdev = to_scsi_device(dev);
670 h = sdev_to_hba(sdev);
671 spin_lock_irqsave(&h->lock, flags);
672 hdev = sdev->hostdata;
674 spin_unlock_irqrestore(&h->lock, flags);
677 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
678 spin_unlock_irqrestore(&h->lock, flags);
679 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
680 lunid[0], lunid[1], lunid[2], lunid[3],
681 lunid[4], lunid[5], lunid[6], lunid[7]);
684 static ssize_t unique_id_show(struct device *dev,
685 struct device_attribute *attr, char *buf)
688 struct scsi_device *sdev;
689 struct hpsa_scsi_dev_t *hdev;
691 unsigned char sn[16];
693 sdev = to_scsi_device(dev);
694 h = sdev_to_hba(sdev);
695 spin_lock_irqsave(&h->lock, flags);
696 hdev = sdev->hostdata;
698 spin_unlock_irqrestore(&h->lock, flags);
701 memcpy(sn, hdev->device_id, sizeof(sn));
702 spin_unlock_irqrestore(&h->lock, flags);
703 return snprintf(buf, 16 * 2 + 2,
704 "%02X%02X%02X%02X%02X%02X%02X%02X"
705 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
706 sn[0], sn[1], sn[2], sn[3],
707 sn[4], sn[5], sn[6], sn[7],
708 sn[8], sn[9], sn[10], sn[11],
709 sn[12], sn[13], sn[14], sn[15]);
712 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
713 struct device_attribute *attr, char *buf)
716 struct scsi_device *sdev;
717 struct hpsa_scsi_dev_t *hdev;
721 sdev = to_scsi_device(dev);
722 h = sdev_to_hba(sdev);
723 spin_lock_irqsave(&h->lock, flags);
724 hdev = sdev->hostdata;
726 spin_unlock_irqrestore(&h->lock, flags);
729 offload_enabled = hdev->offload_enabled;
730 spin_unlock_irqrestore(&h->lock, flags);
731 return snprintf(buf, 20, "%d\n", offload_enabled);
735 #define PATH_STRING_LEN 50
737 static ssize_t path_info_show(struct device *dev,
738 struct device_attribute *attr, char *buf)
741 struct scsi_device *sdev;
742 struct hpsa_scsi_dev_t *hdev;
748 u8 path_map_index = 0;
750 unsigned char phys_connector[2];
751 unsigned char path[MAX_PATHS][PATH_STRING_LEN];
753 memset(path, 0, MAX_PATHS * PATH_STRING_LEN);
754 sdev = to_scsi_device(dev);
755 h = sdev_to_hba(sdev);
756 spin_lock_irqsave(&h->devlock, flags);
757 hdev = sdev->hostdata;
759 spin_unlock_irqrestore(&h->devlock, flags);
764 for (i = 0; i < MAX_PATHS; i++) {
765 path_map_index = 1<<i;
766 if (i == hdev->active_path_index)
768 else if (hdev->path_map & path_map_index)
773 output_len = snprintf(path[i],
774 PATH_STRING_LEN, "[%d:%d:%d:%d] %20.20s ",
775 h->scsi_host->host_no,
776 hdev->bus, hdev->target, hdev->lun,
777 scsi_device_type(hdev->devtype));
779 if (is_ext_target(h, hdev) ||
780 hdev->devtype == TYPE_RAID ||
781 is_logical_device(hdev)) {
782 output_len += snprintf(path[i] + output_len,
783 PATH_STRING_LEN, "%s\n",
789 memcpy(&phys_connector, &hdev->phys_connector[i],
790 sizeof(phys_connector));
791 if (phys_connector[0] < '0')
792 phys_connector[0] = '0';
793 if (phys_connector[1] < '0')
794 phys_connector[1] = '0';
795 if (hdev->phys_connector[i] > 0)
796 output_len += snprintf(path[i] + output_len,
800 if (hdev->devtype == TYPE_DISK && hdev->expose_device) {
801 if (box == 0 || box == 0xFF) {
802 output_len += snprintf(path[i] + output_len,
807 output_len += snprintf(path[i] + output_len,
809 "BOX: %hhu BAY: %hhu %s\n",
812 } else if (box != 0 && box != 0xFF) {
813 output_len += snprintf(path[i] + output_len,
814 PATH_STRING_LEN, "BOX: %hhu %s\n",
817 output_len += snprintf(path[i] + output_len,
818 PATH_STRING_LEN, "%s\n", active);
821 spin_unlock_irqrestore(&h->devlock, flags);
822 return snprintf(buf, output_len+1, "%s%s%s%s%s%s%s%s",
823 path[0], path[1], path[2], path[3],
824 path[4], path[5], path[6], path[7]);
827 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
828 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
829 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
830 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
831 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
832 host_show_hp_ssd_smart_path_enabled, NULL);
833 static DEVICE_ATTR(path_info, S_IRUGO, path_info_show, NULL);
834 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
835 host_show_hp_ssd_smart_path_status,
836 host_store_hp_ssd_smart_path_status);
837 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
838 host_store_raid_offload_debug);
839 static DEVICE_ATTR(firmware_revision, S_IRUGO,
840 host_show_firmware_revision, NULL);
841 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
842 host_show_commands_outstanding, NULL);
843 static DEVICE_ATTR(transport_mode, S_IRUGO,
844 host_show_transport_mode, NULL);
845 static DEVICE_ATTR(resettable, S_IRUGO,
846 host_show_resettable, NULL);
847 static DEVICE_ATTR(lockup_detected, S_IRUGO,
848 host_show_lockup_detected, NULL);
850 static struct device_attribute *hpsa_sdev_attrs[] = {
851 &dev_attr_raid_level,
854 &dev_attr_hp_ssd_smart_path_enabled,
856 &dev_attr_lockup_detected,
860 static struct device_attribute *hpsa_shost_attrs[] = {
862 &dev_attr_firmware_revision,
863 &dev_attr_commands_outstanding,
864 &dev_attr_transport_mode,
865 &dev_attr_resettable,
866 &dev_attr_hp_ssd_smart_path_status,
867 &dev_attr_raid_offload_debug,
871 #define HPSA_NRESERVED_CMDS (HPSA_CMDS_RESERVED_FOR_ABORTS + \
872 HPSA_CMDS_RESERVED_FOR_DRIVER + HPSA_MAX_CONCURRENT_PASSTHRUS)
874 static struct scsi_host_template hpsa_driver_template = {
875 .module = THIS_MODULE,
878 .queuecommand = hpsa_scsi_queue_command,
879 .scan_start = hpsa_scan_start,
880 .scan_finished = hpsa_scan_finished,
881 .change_queue_depth = hpsa_change_queue_depth,
883 .use_clustering = ENABLE_CLUSTERING,
884 .eh_abort_handler = hpsa_eh_abort_handler,
885 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
887 .slave_alloc = hpsa_slave_alloc,
888 .slave_configure = hpsa_slave_configure,
889 .slave_destroy = hpsa_slave_destroy,
891 .compat_ioctl = hpsa_compat_ioctl,
893 .sdev_attrs = hpsa_sdev_attrs,
894 .shost_attrs = hpsa_shost_attrs,
899 static inline u32 next_command(struct ctlr_info *h, u8 q)
902 struct reply_queue_buffer *rq = &h->reply_queue[q];
904 if (h->transMethod & CFGTBL_Trans_io_accel1)
905 return h->access.command_completed(h, q);
907 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
908 return h->access.command_completed(h, q);
910 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
911 a = rq->head[rq->current_entry];
913 atomic_dec(&h->commands_outstanding);
917 /* Check for wraparound */
918 if (rq->current_entry == h->max_commands) {
919 rq->current_entry = 0;
926 * There are some special bits in the bus address of the
927 * command that we have to set for the controller to know
928 * how to process the command:
930 * Normal performant mode:
931 * bit 0: 1 means performant mode, 0 means simple mode.
932 * bits 1-3 = block fetch table entry
933 * bits 4-6 = command type (== 0)
936 * bit 0 = "performant mode" bit.
937 * bits 1-3 = block fetch table entry
938 * bits 4-6 = command type (== 110)
939 * (command type is needed because ioaccel1 mode
940 * commands are submitted through the same register as normal
941 * mode commands, so this is how the controller knows whether
942 * the command is normal mode or ioaccel1 mode.)
945 * bit 0 = "performant mode" bit.
946 * bits 1-4 = block fetch table entry (note extra bit)
947 * bits 4-6 = not needed, because ioaccel2 mode has
948 * a separate special register for submitting commands.
952 * set_performant_mode: Modify the tag for cciss performant
953 * set bit 0 for pull model, bits 3-1 for block fetch
956 #define DEFAULT_REPLY_QUEUE (-1)
957 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c,
960 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
961 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
962 if (unlikely(!h->msix_vector))
964 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
965 c->Header.ReplyQueue =
966 raw_smp_processor_id() % h->nreply_queues;
968 c->Header.ReplyQueue = reply_queue % h->nreply_queues;
972 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
973 struct CommandList *c,
976 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
979 * Tell the controller to post the reply to the queue for this
980 * processor. This seems to give the best I/O throughput.
982 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
983 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
985 cp->ReplyQueue = reply_queue % h->nreply_queues;
987 * Set the bits in the address sent down to include:
988 * - performant mode bit (bit 0)
989 * - pull count (bits 1-3)
990 * - command type (bits 4-6)
992 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
993 IOACCEL1_BUSADDR_CMDTYPE;
996 static void set_ioaccel2_tmf_performant_mode(struct ctlr_info *h,
997 struct CommandList *c,
1000 struct hpsa_tmf_struct *cp = (struct hpsa_tmf_struct *)
1001 &h->ioaccel2_cmd_pool[c->cmdindex];
1003 /* Tell the controller to post the reply to the queue for this
1004 * processor. This seems to give the best I/O throughput.
1006 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1007 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1009 cp->reply_queue = reply_queue % h->nreply_queues;
1010 /* Set the bits in the address sent down to include:
1011 * - performant mode bit not used in ioaccel mode 2
1012 * - pull count (bits 0-3)
1013 * - command type isn't needed for ioaccel2
1015 c->busaddr |= h->ioaccel2_blockFetchTable[0];
1018 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
1019 struct CommandList *c,
1022 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
1025 * Tell the controller to post the reply to the queue for this
1026 * processor. This seems to give the best I/O throughput.
1028 if (likely(reply_queue == DEFAULT_REPLY_QUEUE))
1029 cp->reply_queue = smp_processor_id() % h->nreply_queues;
1031 cp->reply_queue = reply_queue % h->nreply_queues;
1033 * Set the bits in the address sent down to include:
1034 * - performant mode bit not used in ioaccel mode 2
1035 * - pull count (bits 0-3)
1036 * - command type isn't needed for ioaccel2
1038 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
1041 static int is_firmware_flash_cmd(u8 *cdb)
1043 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
1047 * During firmware flash, the heartbeat register may not update as frequently
1048 * as it should. So we dial down lockup detection during firmware flash. and
1049 * dial it back up when firmware flash completes.
1051 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
1052 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
1053 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
1054 struct CommandList *c)
1056 if (!is_firmware_flash_cmd(c->Request.CDB))
1058 atomic_inc(&h->firmware_flash_in_progress);
1059 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
1062 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
1063 struct CommandList *c)
1065 if (is_firmware_flash_cmd(c->Request.CDB) &&
1066 atomic_dec_and_test(&h->firmware_flash_in_progress))
1067 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
1070 static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
1071 struct CommandList *c, int reply_queue)
1073 dial_down_lockup_detection_during_fw_flash(h, c);
1074 atomic_inc(&h->commands_outstanding);
1075 switch (c->cmd_type) {
1077 set_ioaccel1_performant_mode(h, c, reply_queue);
1078 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
1081 set_ioaccel2_performant_mode(h, c, reply_queue);
1082 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1085 set_ioaccel2_tmf_performant_mode(h, c, reply_queue);
1086 writel(c->busaddr, h->vaddr + IOACCEL2_INBOUND_POSTQ_32);
1089 set_performant_mode(h, c, reply_queue);
1090 h->access.submit_command(h, c);
1094 static void enqueue_cmd_and_start_io(struct ctlr_info *h, struct CommandList *c)
1096 if (unlikely(hpsa_is_pending_event(c)))
1097 return finish_cmd(c);
1099 __enqueue_cmd_and_start_io(h, c, DEFAULT_REPLY_QUEUE);
1102 static inline int is_hba_lunid(unsigned char scsi3addr[])
1104 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
1107 static inline int is_scsi_rev_5(struct ctlr_info *h)
1109 if (!h->hba_inquiry_data)
1111 if ((h->hba_inquiry_data[2] & 0x07) == 5)
1116 static int hpsa_find_target_lun(struct ctlr_info *h,
1117 unsigned char scsi3addr[], int bus, int *target, int *lun)
1119 /* finds an unused bus, target, lun for a new physical device
1120 * assumes h->devlock is held
1123 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
1125 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
1127 for (i = 0; i < h->ndevices; i++) {
1128 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
1129 __set_bit(h->dev[i]->target, lun_taken);
1132 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
1133 if (i < HPSA_MAX_DEVICES) {
1142 static void hpsa_show_dev_msg(const char *level, struct ctlr_info *h,
1143 struct hpsa_scsi_dev_t *dev, char *description)
1145 if (h == NULL || h->pdev == NULL || h->scsi_host == NULL)
1148 dev_printk(level, &h->pdev->dev,
1149 "scsi %d:%d:%d:%d: %s %s %.8s %.16s RAID-%s SSDSmartPathCap%c En%c Exp=%d\n",
1150 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
1152 scsi_device_type(dev->devtype),
1155 dev->raid_level > RAID_UNKNOWN ?
1156 "RAID-?" : raid_label[dev->raid_level],
1157 dev->offload_config ? '+' : '-',
1158 dev->offload_enabled ? '+' : '-',
1159 dev->expose_device);
1162 /* Add an entry into h->dev[] array. */
1163 static int hpsa_scsi_add_entry(struct ctlr_info *h,
1164 struct hpsa_scsi_dev_t *device,
1165 struct hpsa_scsi_dev_t *added[], int *nadded)
1167 /* assumes h->devlock is held */
1168 int n = h->ndevices;
1170 unsigned char addr1[8], addr2[8];
1171 struct hpsa_scsi_dev_t *sd;
1173 if (n >= HPSA_MAX_DEVICES) {
1174 dev_err(&h->pdev->dev, "too many devices, some will be "
1179 /* physical devices do not have lun or target assigned until now. */
1180 if (device->lun != -1)
1181 /* Logical device, lun is already assigned. */
1184 /* If this device a non-zero lun of a multi-lun device
1185 * byte 4 of the 8-byte LUN addr will contain the logical
1186 * unit no, zero otherwise.
1188 if (device->scsi3addr[4] == 0) {
1189 /* This is not a non-zero lun of a multi-lun device */
1190 if (hpsa_find_target_lun(h, device->scsi3addr,
1191 device->bus, &device->target, &device->lun) != 0)
1196 /* This is a non-zero lun of a multi-lun device.
1197 * Search through our list and find the device which
1198 * has the same 8 byte LUN address, excepting byte 4 and 5.
1199 * Assign the same bus and target for this new LUN.
1200 * Use the logical unit number from the firmware.
1202 memcpy(addr1, device->scsi3addr, 8);
1205 for (i = 0; i < n; i++) {
1207 memcpy(addr2, sd->scsi3addr, 8);
1210 /* differ only in byte 4 and 5? */
1211 if (memcmp(addr1, addr2, 8) == 0) {
1212 device->bus = sd->bus;
1213 device->target = sd->target;
1214 device->lun = device->scsi3addr[4];
1218 if (device->lun == -1) {
1219 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
1220 " suspect firmware bug or unsupported hardware "
1221 "configuration.\n");
1229 added[*nadded] = device;
1231 hpsa_show_dev_msg(KERN_INFO, h, device,
1232 device->expose_device ? "added" : "masked");
1233 device->offload_to_be_enabled = device->offload_enabled;
1234 device->offload_enabled = 0;
1238 /* Update an entry in h->dev[] array. */
1239 static void hpsa_scsi_update_entry(struct ctlr_info *h,
1240 int entry, struct hpsa_scsi_dev_t *new_entry)
1242 int offload_enabled;
1243 /* assumes h->devlock is held */
1244 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1246 /* Raid level changed. */
1247 h->dev[entry]->raid_level = new_entry->raid_level;
1249 /* Raid offload parameters changed. Careful about the ordering. */
1250 if (new_entry->offload_config && new_entry->offload_enabled) {
1252 * if drive is newly offload_enabled, we want to copy the
1253 * raid map data first. If previously offload_enabled and
1254 * offload_config were set, raid map data had better be
1255 * the same as it was before. if raid map data is changed
1256 * then it had better be the case that
1257 * h->dev[entry]->offload_enabled is currently 0.
1259 h->dev[entry]->raid_map = new_entry->raid_map;
1260 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1262 if (new_entry->hba_ioaccel_enabled) {
1263 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
1264 wmb(); /* set ioaccel_handle *before* hba_ioaccel_enabled */
1266 h->dev[entry]->hba_ioaccel_enabled = new_entry->hba_ioaccel_enabled;
1267 h->dev[entry]->offload_config = new_entry->offload_config;
1268 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
1269 h->dev[entry]->queue_depth = new_entry->queue_depth;
1272 * We can turn off ioaccel offload now, but need to delay turning
1273 * it on until we can update h->dev[entry]->phys_disk[], but we
1274 * can't do that until all the devices are updated.
1276 h->dev[entry]->offload_to_be_enabled = new_entry->offload_enabled;
1277 if (!new_entry->offload_enabled)
1278 h->dev[entry]->offload_enabled = 0;
1280 offload_enabled = h->dev[entry]->offload_enabled;
1281 h->dev[entry]->offload_enabled = h->dev[entry]->offload_to_be_enabled;
1282 hpsa_show_dev_msg(KERN_INFO, h, h->dev[entry], "updated");
1283 h->dev[entry]->offload_enabled = offload_enabled;
1286 /* Replace an entry from h->dev[] array. */
1287 static void hpsa_scsi_replace_entry(struct ctlr_info *h,
1288 int entry, struct hpsa_scsi_dev_t *new_entry,
1289 struct hpsa_scsi_dev_t *added[], int *nadded,
1290 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1292 /* assumes h->devlock is held */
1293 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1294 removed[*nremoved] = h->dev[entry];
1298 * New physical devices won't have target/lun assigned yet
1299 * so we need to preserve the values in the slot we are replacing.
1301 if (new_entry->target == -1) {
1302 new_entry->target = h->dev[entry]->target;
1303 new_entry->lun = h->dev[entry]->lun;
1306 h->dev[entry] = new_entry;
1307 added[*nadded] = new_entry;
1309 hpsa_show_dev_msg(KERN_INFO, h, new_entry, "replaced");
1310 new_entry->offload_to_be_enabled = new_entry->offload_enabled;
1311 new_entry->offload_enabled = 0;
1314 /* Remove an entry from h->dev[] array. */
1315 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int entry,
1316 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1318 /* assumes h->devlock is held */
1320 struct hpsa_scsi_dev_t *sd;
1322 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1325 removed[*nremoved] = h->dev[entry];
1328 for (i = entry; i < h->ndevices-1; i++)
1329 h->dev[i] = h->dev[i+1];
1331 hpsa_show_dev_msg(KERN_INFO, h, sd, "removed");
1334 #define SCSI3ADDR_EQ(a, b) ( \
1335 (a)[7] == (b)[7] && \
1336 (a)[6] == (b)[6] && \
1337 (a)[5] == (b)[5] && \
1338 (a)[4] == (b)[4] && \
1339 (a)[3] == (b)[3] && \
1340 (a)[2] == (b)[2] && \
1341 (a)[1] == (b)[1] && \
1344 static void fixup_botched_add(struct ctlr_info *h,
1345 struct hpsa_scsi_dev_t *added)
1347 /* called when scsi_add_device fails in order to re-adjust
1348 * h->dev[] to match the mid layer's view.
1350 unsigned long flags;
1353 spin_lock_irqsave(&h->lock, flags);
1354 for (i = 0; i < h->ndevices; i++) {
1355 if (h->dev[i] == added) {
1356 for (j = i; j < h->ndevices-1; j++)
1357 h->dev[j] = h->dev[j+1];
1362 spin_unlock_irqrestore(&h->lock, flags);
1366 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1367 struct hpsa_scsi_dev_t *dev2)
1369 /* we compare everything except lun and target as these
1370 * are not yet assigned. Compare parts likely
1373 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1374 sizeof(dev1->scsi3addr)) != 0)
1376 if (memcmp(dev1->device_id, dev2->device_id,
1377 sizeof(dev1->device_id)) != 0)
1379 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1381 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1383 if (dev1->devtype != dev2->devtype)
1385 if (dev1->bus != dev2->bus)
1390 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1391 struct hpsa_scsi_dev_t *dev2)
1393 /* Device attributes that can change, but don't mean
1394 * that the device is a different device, nor that the OS
1395 * needs to be told anything about the change.
1397 if (dev1->raid_level != dev2->raid_level)
1399 if (dev1->offload_config != dev2->offload_config)
1401 if (dev1->offload_enabled != dev2->offload_enabled)
1403 if (!is_logical_dev_addr_mode(dev1->scsi3addr))
1404 if (dev1->queue_depth != dev2->queue_depth)
1409 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1410 * and return needle location in *index. If scsi3addr matches, but not
1411 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1412 * location in *index.
1413 * In the case of a minor device attribute change, such as RAID level, just
1414 * return DEVICE_UPDATED, along with the updated device's location in index.
1415 * If needle not found, return DEVICE_NOT_FOUND.
1417 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1418 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1422 #define DEVICE_NOT_FOUND 0
1423 #define DEVICE_CHANGED 1
1424 #define DEVICE_SAME 2
1425 #define DEVICE_UPDATED 3
1427 return DEVICE_NOT_FOUND;
1429 for (i = 0; i < haystack_size; i++) {
1430 if (haystack[i] == NULL) /* previously removed. */
1432 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1434 if (device_is_the_same(needle, haystack[i])) {
1435 if (device_updated(needle, haystack[i]))
1436 return DEVICE_UPDATED;
1439 /* Keep offline devices offline */
1440 if (needle->volume_offline)
1441 return DEVICE_NOT_FOUND;
1442 return DEVICE_CHANGED;
1447 return DEVICE_NOT_FOUND;
1450 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1451 unsigned char scsi3addr[])
1453 struct offline_device_entry *device;
1454 unsigned long flags;
1456 /* Check to see if device is already on the list */
1457 spin_lock_irqsave(&h->offline_device_lock, flags);
1458 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1459 if (memcmp(device->scsi3addr, scsi3addr,
1460 sizeof(device->scsi3addr)) == 0) {
1461 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1465 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1467 /* Device is not on the list, add it. */
1468 device = kmalloc(sizeof(*device), GFP_KERNEL);
1470 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1473 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1474 spin_lock_irqsave(&h->offline_device_lock, flags);
1475 list_add_tail(&device->offline_list, &h->offline_device_list);
1476 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1479 /* Print a message explaining various offline volume states */
1480 static void hpsa_show_volume_status(struct ctlr_info *h,
1481 struct hpsa_scsi_dev_t *sd)
1483 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1484 dev_info(&h->pdev->dev,
1485 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1486 h->scsi_host->host_no,
1487 sd->bus, sd->target, sd->lun);
1488 switch (sd->volume_offline) {
1491 case HPSA_LV_UNDERGOING_ERASE:
1492 dev_info(&h->pdev->dev,
1493 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1494 h->scsi_host->host_no,
1495 sd->bus, sd->target, sd->lun);
1497 case HPSA_LV_NOT_AVAILABLE:
1498 dev_info(&h->pdev->dev,
1499 "C%d:B%d:T%d:L%d Volume is waiting for transforming volume.\n",
1500 h->scsi_host->host_no,
1501 sd->bus, sd->target, sd->lun);
1503 case HPSA_LV_UNDERGOING_RPI:
1504 dev_info(&h->pdev->dev,
1505 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity init.\n",
1506 h->scsi_host->host_no,
1507 sd->bus, sd->target, sd->lun);
1509 case HPSA_LV_PENDING_RPI:
1510 dev_info(&h->pdev->dev,
1511 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1512 h->scsi_host->host_no,
1513 sd->bus, sd->target, sd->lun);
1515 case HPSA_LV_ENCRYPTED_NO_KEY:
1516 dev_info(&h->pdev->dev,
1517 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1518 h->scsi_host->host_no,
1519 sd->bus, sd->target, sd->lun);
1521 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1522 dev_info(&h->pdev->dev,
1523 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1524 h->scsi_host->host_no,
1525 sd->bus, sd->target, sd->lun);
1527 case HPSA_LV_UNDERGOING_ENCRYPTION:
1528 dev_info(&h->pdev->dev,
1529 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1530 h->scsi_host->host_no,
1531 sd->bus, sd->target, sd->lun);
1533 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1534 dev_info(&h->pdev->dev,
1535 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1536 h->scsi_host->host_no,
1537 sd->bus, sd->target, sd->lun);
1539 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1540 dev_info(&h->pdev->dev,
1541 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1542 h->scsi_host->host_no,
1543 sd->bus, sd->target, sd->lun);
1545 case HPSA_LV_PENDING_ENCRYPTION:
1546 dev_info(&h->pdev->dev,
1547 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1548 h->scsi_host->host_no,
1549 sd->bus, sd->target, sd->lun);
1551 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1552 dev_info(&h->pdev->dev,
1553 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1554 h->scsi_host->host_no,
1555 sd->bus, sd->target, sd->lun);
1561 * Figure the list of physical drive pointers for a logical drive with
1562 * raid offload configured.
1564 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1565 struct hpsa_scsi_dev_t *dev[], int ndevices,
1566 struct hpsa_scsi_dev_t *logical_drive)
1568 struct raid_map_data *map = &logical_drive->raid_map;
1569 struct raid_map_disk_data *dd = &map->data[0];
1571 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1572 le16_to_cpu(map->metadata_disks_per_row);
1573 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1574 le16_to_cpu(map->layout_map_count) *
1575 total_disks_per_row;
1576 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1577 total_disks_per_row;
1580 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1581 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1583 logical_drive->nphysical_disks = nraid_map_entries;
1586 for (i = 0; i < nraid_map_entries; i++) {
1587 logical_drive->phys_disk[i] = NULL;
1588 if (!logical_drive->offload_config)
1590 for (j = 0; j < ndevices; j++) {
1593 if (dev[j]->devtype != TYPE_DISK)
1595 if (is_logical_device(dev[j]))
1597 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1600 logical_drive->phys_disk[i] = dev[j];
1602 qdepth = min(h->nr_cmds, qdepth +
1603 logical_drive->phys_disk[i]->queue_depth);
1608 * This can happen if a physical drive is removed and
1609 * the logical drive is degraded. In that case, the RAID
1610 * map data will refer to a physical disk which isn't actually
1611 * present. And in that case offload_enabled should already
1612 * be 0, but we'll turn it off here just in case
1614 if (!logical_drive->phys_disk[i]) {
1615 logical_drive->offload_enabled = 0;
1616 logical_drive->offload_to_be_enabled = 0;
1617 logical_drive->queue_depth = 8;
1620 if (nraid_map_entries)
1622 * This is correct for reads, too high for full stripe writes,
1623 * way too high for partial stripe writes
1625 logical_drive->queue_depth = qdepth;
1627 logical_drive->queue_depth = h->nr_cmds;
1630 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1631 struct hpsa_scsi_dev_t *dev[], int ndevices)
1635 for (i = 0; i < ndevices; i++) {
1638 if (dev[i]->devtype != TYPE_DISK)
1640 if (!is_logical_device(dev[i]))
1644 * If offload is currently enabled, the RAID map and
1645 * phys_disk[] assignment *better* not be changing
1646 * and since it isn't changing, we do not need to
1649 if (dev[i]->offload_enabled)
1652 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1656 static int hpsa_add_device(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
1663 rc = scsi_add_device(h->scsi_host, device->bus,
1664 device->target, device->lun);
1668 static void hpsa_remove_device(struct ctlr_info *h,
1669 struct hpsa_scsi_dev_t *device)
1671 struct scsi_device *sdev = NULL;
1676 sdev = scsi_device_lookup(h->scsi_host, device->bus,
1677 device->target, device->lun);
1680 scsi_remove_device(sdev);
1681 scsi_device_put(sdev);
1684 * We don't expect to get here. Future commands
1685 * to this device will get a selection timeout as
1686 * if the device were gone.
1688 hpsa_show_dev_msg(KERN_WARNING, h, device,
1689 "didn't find device for removal.");
1693 static void adjust_hpsa_scsi_table(struct ctlr_info *h,
1694 struct hpsa_scsi_dev_t *sd[], int nsds)
1696 /* sd contains scsi3 addresses and devtypes, and inquiry
1697 * data. This function takes what's in sd to be the current
1698 * reality and updates h->dev[] to reflect that reality.
1700 int i, entry, device_change, changes = 0;
1701 struct hpsa_scsi_dev_t *csd;
1702 unsigned long flags;
1703 struct hpsa_scsi_dev_t **added, **removed;
1704 int nadded, nremoved;
1707 * A reset can cause a device status to change
1708 * re-schedule the scan to see what happened.
1710 if (h->reset_in_progress) {
1711 h->drv_req_rescan = 1;
1715 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1716 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1718 if (!added || !removed) {
1719 dev_warn(&h->pdev->dev, "out of memory in "
1720 "adjust_hpsa_scsi_table\n");
1724 spin_lock_irqsave(&h->devlock, flags);
1726 /* find any devices in h->dev[] that are not in
1727 * sd[] and remove them from h->dev[], and for any
1728 * devices which have changed, remove the old device
1729 * info and add the new device info.
1730 * If minor device attributes change, just update
1731 * the existing device structure.
1736 while (i < h->ndevices) {
1738 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1739 if (device_change == DEVICE_NOT_FOUND) {
1741 hpsa_scsi_remove_entry(h, i, removed, &nremoved);
1742 continue; /* remove ^^^, hence i not incremented */
1743 } else if (device_change == DEVICE_CHANGED) {
1745 hpsa_scsi_replace_entry(h, i, sd[entry],
1746 added, &nadded, removed, &nremoved);
1747 /* Set it to NULL to prevent it from being freed
1748 * at the bottom of hpsa_update_scsi_devices()
1751 } else if (device_change == DEVICE_UPDATED) {
1752 hpsa_scsi_update_entry(h, i, sd[entry]);
1757 /* Now, make sure every device listed in sd[] is also
1758 * listed in h->dev[], adding them if they aren't found
1761 for (i = 0; i < nsds; i++) {
1762 if (!sd[i]) /* if already added above. */
1765 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1766 * as the SCSI mid-layer does not handle such devices well.
1767 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1768 * at 160Hz, and prevents the system from coming up.
1770 if (sd[i]->volume_offline) {
1771 hpsa_show_volume_status(h, sd[i]);
1772 hpsa_show_dev_msg(KERN_INFO, h, sd[i], "offline");
1776 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1777 h->ndevices, &entry);
1778 if (device_change == DEVICE_NOT_FOUND) {
1780 if (hpsa_scsi_add_entry(h, sd[i], added, &nadded) != 0)
1782 sd[i] = NULL; /* prevent from being freed later. */
1783 } else if (device_change == DEVICE_CHANGED) {
1784 /* should never happen... */
1786 dev_warn(&h->pdev->dev,
1787 "device unexpectedly changed.\n");
1788 /* but if it does happen, we just ignore that device */
1791 hpsa_update_log_drive_phys_drive_ptrs(h, h->dev, h->ndevices);
1793 /* Now that h->dev[]->phys_disk[] is coherent, we can enable
1794 * any logical drives that need it enabled.
1796 for (i = 0; i < h->ndevices; i++) {
1797 if (h->dev[i] == NULL)
1799 h->dev[i]->offload_enabled = h->dev[i]->offload_to_be_enabled;
1802 spin_unlock_irqrestore(&h->devlock, flags);
1804 /* Monitor devices which are in one of several NOT READY states to be
1805 * brought online later. This must be done without holding h->devlock,
1806 * so don't touch h->dev[]
1808 for (i = 0; i < nsds; i++) {
1809 if (!sd[i]) /* if already added above. */
1811 if (sd[i]->volume_offline)
1812 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1815 /* Don't notify scsi mid layer of any changes the first time through
1816 * (or if there are no changes) scsi_scan_host will do it later the
1817 * first time through.
1822 /* Notify scsi mid layer of any removed devices */
1823 for (i = 0; i < nremoved; i++) {
1824 if (removed[i] == NULL)
1826 if (removed[i]->expose_device)
1827 hpsa_remove_device(h, removed[i]);
1832 /* Notify scsi mid layer of any added devices */
1833 for (i = 0; i < nadded; i++) {
1836 if (added[i] == NULL)
1838 if (!(added[i]->expose_device))
1840 rc = hpsa_add_device(h, added[i]);
1843 dev_warn(&h->pdev->dev,
1844 "addition failed %d, device not added.", rc);
1845 /* now we have to remove it from h->dev,
1846 * since it didn't get added to scsi mid layer
1848 fixup_botched_add(h, added[i]);
1849 h->drv_req_rescan = 1;
1858 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1859 * Assume's h->devlock is held.
1861 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1862 int bus, int target, int lun)
1865 struct hpsa_scsi_dev_t *sd;
1867 for (i = 0; i < h->ndevices; i++) {
1869 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1875 static int hpsa_slave_alloc(struct scsi_device *sdev)
1877 struct hpsa_scsi_dev_t *sd;
1878 unsigned long flags;
1879 struct ctlr_info *h;
1881 h = sdev_to_hba(sdev);
1882 spin_lock_irqsave(&h->devlock, flags);
1883 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1884 sdev_id(sdev), sdev->lun);
1886 atomic_set(&sd->ioaccel_cmds_out, 0);
1887 sdev->hostdata = sd->expose_device ? sd : NULL;
1889 sdev->hostdata = NULL;
1890 spin_unlock_irqrestore(&h->devlock, flags);
1894 /* configure scsi device based on internal per-device structure */
1895 static int hpsa_slave_configure(struct scsi_device *sdev)
1897 struct hpsa_scsi_dev_t *sd;
1900 sd = sdev->hostdata;
1901 sdev->no_uld_attach = !sd || !sd->expose_device;
1904 queue_depth = sd->queue_depth != 0 ?
1905 sd->queue_depth : sdev->host->can_queue;
1907 queue_depth = sdev->host->can_queue;
1909 scsi_change_queue_depth(sdev, queue_depth);
1914 static void hpsa_slave_destroy(struct scsi_device *sdev)
1916 /* nothing to do. */
1919 static void hpsa_free_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1923 if (!h->ioaccel2_cmd_sg_list)
1925 for (i = 0; i < h->nr_cmds; i++) {
1926 kfree(h->ioaccel2_cmd_sg_list[i]);
1927 h->ioaccel2_cmd_sg_list[i] = NULL;
1929 kfree(h->ioaccel2_cmd_sg_list);
1930 h->ioaccel2_cmd_sg_list = NULL;
1933 static int hpsa_allocate_ioaccel2_sg_chain_blocks(struct ctlr_info *h)
1937 if (h->chainsize <= 0)
1940 h->ioaccel2_cmd_sg_list =
1941 kzalloc(sizeof(*h->ioaccel2_cmd_sg_list) * h->nr_cmds,
1943 if (!h->ioaccel2_cmd_sg_list)
1945 for (i = 0; i < h->nr_cmds; i++) {
1946 h->ioaccel2_cmd_sg_list[i] =
1947 kmalloc(sizeof(*h->ioaccel2_cmd_sg_list[i]) *
1948 h->maxsgentries, GFP_KERNEL);
1949 if (!h->ioaccel2_cmd_sg_list[i])
1955 hpsa_free_ioaccel2_sg_chain_blocks(h);
1959 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1963 if (!h->cmd_sg_list)
1965 for (i = 0; i < h->nr_cmds; i++) {
1966 kfree(h->cmd_sg_list[i]);
1967 h->cmd_sg_list[i] = NULL;
1969 kfree(h->cmd_sg_list);
1970 h->cmd_sg_list = NULL;
1973 static int hpsa_alloc_sg_chain_blocks(struct ctlr_info *h)
1977 if (h->chainsize <= 0)
1980 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1982 if (!h->cmd_sg_list) {
1983 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1986 for (i = 0; i < h->nr_cmds; i++) {
1987 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1988 h->chainsize, GFP_KERNEL);
1989 if (!h->cmd_sg_list[i]) {
1990 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1997 hpsa_free_sg_chain_blocks(h);
2001 static int hpsa_map_ioaccel2_sg_chain_block(struct ctlr_info *h,
2002 struct io_accel2_cmd *cp, struct CommandList *c)
2004 struct ioaccel2_sg_element *chain_block;
2008 chain_block = h->ioaccel2_cmd_sg_list[c->cmdindex];
2009 chain_size = le32_to_cpu(cp->sg[0].length);
2010 temp64 = pci_map_single(h->pdev, chain_block, chain_size,
2012 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2013 /* prevent subsequent unmapping */
2014 cp->sg->address = 0;
2017 cp->sg->address = cpu_to_le64(temp64);
2021 static void hpsa_unmap_ioaccel2_sg_chain_block(struct ctlr_info *h,
2022 struct io_accel2_cmd *cp)
2024 struct ioaccel2_sg_element *chain_sg;
2029 temp64 = le64_to_cpu(chain_sg->address);
2030 chain_size = le32_to_cpu(cp->sg[0].length);
2031 pci_unmap_single(h->pdev, temp64, chain_size, PCI_DMA_TODEVICE);
2034 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
2035 struct CommandList *c)
2037 struct SGDescriptor *chain_sg, *chain_block;
2041 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2042 chain_block = h->cmd_sg_list[c->cmdindex];
2043 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
2044 chain_len = sizeof(*chain_sg) *
2045 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
2046 chain_sg->Len = cpu_to_le32(chain_len);
2047 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
2049 if (dma_mapping_error(&h->pdev->dev, temp64)) {
2050 /* prevent subsequent unmapping */
2051 chain_sg->Addr = cpu_to_le64(0);
2054 chain_sg->Addr = cpu_to_le64(temp64);
2058 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
2059 struct CommandList *c)
2061 struct SGDescriptor *chain_sg;
2063 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
2066 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
2067 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
2068 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
2072 /* Decode the various types of errors on ioaccel2 path.
2073 * Return 1 for any error that should generate a RAID path retry.
2074 * Return 0 for errors that don't require a RAID path retry.
2076 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
2077 struct CommandList *c,
2078 struct scsi_cmnd *cmd,
2079 struct io_accel2_cmd *c2)
2083 u32 ioaccel2_resid = 0;
2085 switch (c2->error_data.serv_response) {
2086 case IOACCEL2_SERV_RESPONSE_COMPLETE:
2087 switch (c2->error_data.status) {
2088 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
2090 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
2091 cmd->result |= SAM_STAT_CHECK_CONDITION;
2092 if (c2->error_data.data_present !=
2093 IOACCEL2_SENSE_DATA_PRESENT) {
2094 memset(cmd->sense_buffer, 0,
2095 SCSI_SENSE_BUFFERSIZE);
2098 /* copy the sense data */
2099 data_len = c2->error_data.sense_data_len;
2100 if (data_len > SCSI_SENSE_BUFFERSIZE)
2101 data_len = SCSI_SENSE_BUFFERSIZE;
2102 if (data_len > sizeof(c2->error_data.sense_data_buff))
2104 sizeof(c2->error_data.sense_data_buff);
2105 memcpy(cmd->sense_buffer,
2106 c2->error_data.sense_data_buff, data_len);
2109 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
2112 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
2115 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
2118 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
2126 case IOACCEL2_SERV_RESPONSE_FAILURE:
2127 switch (c2->error_data.status) {
2128 case IOACCEL2_STATUS_SR_IO_ERROR:
2129 case IOACCEL2_STATUS_SR_IO_ABORTED:
2130 case IOACCEL2_STATUS_SR_OVERRUN:
2133 case IOACCEL2_STATUS_SR_UNDERRUN:
2134 cmd->result = (DID_OK << 16); /* host byte */
2135 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2136 ioaccel2_resid = get_unaligned_le32(
2137 &c2->error_data.resid_cnt[0]);
2138 scsi_set_resid(cmd, ioaccel2_resid);
2140 case IOACCEL2_STATUS_SR_NO_PATH_TO_DEVICE:
2141 case IOACCEL2_STATUS_SR_INVALID_DEVICE:
2142 case IOACCEL2_STATUS_SR_IOACCEL_DISABLED:
2143 /* We will get an event from ctlr to trigger rescan */
2150 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
2152 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
2154 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
2157 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
2164 return retry; /* retry on raid path? */
2167 static void hpsa_cmd_resolve_events(struct ctlr_info *h,
2168 struct CommandList *c)
2170 bool do_wake = false;
2173 * Prevent the following race in the abort handler:
2175 * 1. LLD is requested to abort a SCSI command
2176 * 2. The SCSI command completes
2177 * 3. The struct CommandList associated with step 2 is made available
2178 * 4. New I/O request to LLD to another LUN re-uses struct CommandList
2179 * 5. Abort handler follows scsi_cmnd->host_scribble and
2180 * finds struct CommandList and tries to aborts it
2181 * Now we have aborted the wrong command.
2183 * Reset c->scsi_cmd here so that the abort or reset handler will know
2184 * this command has completed. Then, check to see if the handler is
2185 * waiting for this command, and, if so, wake it.
2187 c->scsi_cmd = SCSI_CMD_IDLE;
2188 mb(); /* Declare command idle before checking for pending events. */
2189 if (c->abort_pending) {
2191 c->abort_pending = false;
2193 if (c->reset_pending) {
2194 unsigned long flags;
2195 struct hpsa_scsi_dev_t *dev;
2198 * There appears to be a reset pending; lock the lock and
2199 * reconfirm. If so, then decrement the count of outstanding
2200 * commands and wake the reset command if this is the last one.
2202 spin_lock_irqsave(&h->lock, flags);
2203 dev = c->reset_pending; /* Re-fetch under the lock. */
2204 if (dev && atomic_dec_and_test(&dev->reset_cmds_out))
2206 c->reset_pending = NULL;
2207 spin_unlock_irqrestore(&h->lock, flags);
2211 wake_up_all(&h->event_sync_wait_queue);
2214 static void hpsa_cmd_resolve_and_free(struct ctlr_info *h,
2215 struct CommandList *c)
2217 hpsa_cmd_resolve_events(h, c);
2218 cmd_tagged_free(h, c);
2221 static void hpsa_cmd_free_and_done(struct ctlr_info *h,
2222 struct CommandList *c, struct scsi_cmnd *cmd)
2224 hpsa_cmd_resolve_and_free(h, c);
2225 cmd->scsi_done(cmd);
2228 static void hpsa_retry_cmd(struct ctlr_info *h, struct CommandList *c)
2230 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
2231 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
2234 static void hpsa_set_scsi_cmd_aborted(struct scsi_cmnd *cmd)
2236 cmd->result = DID_ABORT << 16;
2239 static void hpsa_cmd_abort_and_free(struct ctlr_info *h, struct CommandList *c,
2240 struct scsi_cmnd *cmd)
2242 hpsa_set_scsi_cmd_aborted(cmd);
2243 dev_warn(&h->pdev->dev, "CDB %16phN was aborted with status 0x%x\n",
2244 c->Request.CDB, c->err_info->ScsiStatus);
2245 hpsa_cmd_resolve_and_free(h, c);
2248 static void process_ioaccel2_completion(struct ctlr_info *h,
2249 struct CommandList *c, struct scsi_cmnd *cmd,
2250 struct hpsa_scsi_dev_t *dev)
2252 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2254 /* check for good status */
2255 if (likely(c2->error_data.serv_response == 0 &&
2256 c2->error_data.status == 0))
2257 return hpsa_cmd_free_and_done(h, c, cmd);
2260 * Any RAID offload error results in retry which will use
2261 * the normal I/O path so the controller can handle whatever's
2264 if (is_logical_device(dev) &&
2265 c2->error_data.serv_response ==
2266 IOACCEL2_SERV_RESPONSE_FAILURE) {
2267 if (c2->error_data.status ==
2268 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
2269 dev->offload_enabled = 0;
2271 return hpsa_retry_cmd(h, c);
2274 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
2275 return hpsa_retry_cmd(h, c);
2277 return hpsa_cmd_free_and_done(h, c, cmd);
2280 /* Returns 0 on success, < 0 otherwise. */
2281 static int hpsa_evaluate_tmf_status(struct ctlr_info *h,
2282 struct CommandList *cp)
2284 u8 tmf_status = cp->err_info->ScsiStatus;
2286 switch (tmf_status) {
2287 case CISS_TMF_COMPLETE:
2289 * CISS_TMF_COMPLETE never happens, instead,
2290 * ei->CommandStatus == 0 for this case.
2292 case CISS_TMF_SUCCESS:
2294 case CISS_TMF_INVALID_FRAME:
2295 case CISS_TMF_NOT_SUPPORTED:
2296 case CISS_TMF_FAILED:
2297 case CISS_TMF_WRONG_LUN:
2298 case CISS_TMF_OVERLAPPED_TAG:
2301 dev_warn(&h->pdev->dev, "Unknown TMF status: 0x%02x\n",
2308 static void complete_scsi_command(struct CommandList *cp)
2310 struct scsi_cmnd *cmd;
2311 struct ctlr_info *h;
2312 struct ErrorInfo *ei;
2313 struct hpsa_scsi_dev_t *dev;
2314 struct io_accel2_cmd *c2;
2317 u8 asc; /* additional sense code */
2318 u8 ascq; /* additional sense code qualifier */
2319 unsigned long sense_data_size;
2324 dev = cmd->device->hostdata;
2325 c2 = &h->ioaccel2_cmd_pool[cp->cmdindex];
2327 scsi_dma_unmap(cmd); /* undo the DMA mappings */
2328 if ((cp->cmd_type == CMD_SCSI) &&
2329 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
2330 hpsa_unmap_sg_chain_block(h, cp);
2332 if ((cp->cmd_type == CMD_IOACCEL2) &&
2333 (c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
2334 hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
2336 cmd->result = (DID_OK << 16); /* host byte */
2337 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
2339 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
2340 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
2343 * We check for lockup status here as it may be set for
2344 * CMD_SCSI, CMD_IOACCEL1 and CMD_IOACCEL2 commands by
2345 * fail_all_oustanding_cmds()
2347 if (unlikely(ei->CommandStatus == CMD_CTLR_LOCKUP)) {
2348 /* DID_NO_CONNECT will prevent a retry */
2349 cmd->result = DID_NO_CONNECT << 16;
2350 return hpsa_cmd_free_and_done(h, cp, cmd);
2353 if ((unlikely(hpsa_is_pending_event(cp)))) {
2354 if (cp->reset_pending)
2355 return hpsa_cmd_resolve_and_free(h, cp);
2356 if (cp->abort_pending)
2357 return hpsa_cmd_abort_and_free(h, cp, cmd);
2360 if (cp->cmd_type == CMD_IOACCEL2)
2361 return process_ioaccel2_completion(h, cp, cmd, dev);
2363 scsi_set_resid(cmd, ei->ResidualCnt);
2364 if (ei->CommandStatus == 0)
2365 return hpsa_cmd_free_and_done(h, cp, cmd);
2367 /* For I/O accelerator commands, copy over some fields to the normal
2368 * CISS header used below for error handling.
2370 if (cp->cmd_type == CMD_IOACCEL1) {
2371 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
2372 cp->Header.SGList = scsi_sg_count(cmd);
2373 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
2374 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
2375 IOACCEL1_IOFLAGS_CDBLEN_MASK;
2376 cp->Header.tag = c->tag;
2377 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
2378 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
2380 /* Any RAID offload error results in retry which will use
2381 * the normal I/O path so the controller can handle whatever's
2384 if (is_logical_device(dev)) {
2385 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
2386 dev->offload_enabled = 0;
2387 return hpsa_retry_cmd(h, cp);
2391 /* an error has occurred */
2392 switch (ei->CommandStatus) {
2394 case CMD_TARGET_STATUS:
2395 cmd->result |= ei->ScsiStatus;
2396 /* copy the sense data */
2397 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
2398 sense_data_size = SCSI_SENSE_BUFFERSIZE;
2400 sense_data_size = sizeof(ei->SenseInfo);
2401 if (ei->SenseLen < sense_data_size)
2402 sense_data_size = ei->SenseLen;
2403 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
2405 decode_sense_data(ei->SenseInfo, sense_data_size,
2406 &sense_key, &asc, &ascq);
2407 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
2408 if (sense_key == ABORTED_COMMAND) {
2409 cmd->result |= DID_SOFT_ERROR << 16;
2414 /* Problem was not a check condition
2415 * Pass it up to the upper layers...
2417 if (ei->ScsiStatus) {
2418 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
2419 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
2420 "Returning result: 0x%x\n",
2422 sense_key, asc, ascq,
2424 } else { /* scsi status is zero??? How??? */
2425 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
2426 "Returning no connection.\n", cp),
2428 /* Ordinarily, this case should never happen,
2429 * but there is a bug in some released firmware
2430 * revisions that allows it to happen if, for
2431 * example, a 4100 backplane loses power and
2432 * the tape drive is in it. We assume that
2433 * it's a fatal error of some kind because we
2434 * can't show that it wasn't. We will make it
2435 * look like selection timeout since that is
2436 * the most common reason for this to occur,
2437 * and it's severe enough.
2440 cmd->result = DID_NO_CONNECT << 16;
2444 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2446 case CMD_DATA_OVERRUN:
2447 dev_warn(&h->pdev->dev,
2448 "CDB %16phN data overrun\n", cp->Request.CDB);
2451 /* print_bytes(cp, sizeof(*cp), 1, 0);
2453 /* We get CMD_INVALID if you address a non-existent device
2454 * instead of a selection timeout (no response). You will
2455 * see this if you yank out a drive, then try to access it.
2456 * This is kind of a shame because it means that any other
2457 * CMD_INVALID (e.g. driver bug) will get interpreted as a
2458 * missing target. */
2459 cmd->result = DID_NO_CONNECT << 16;
2462 case CMD_PROTOCOL_ERR:
2463 cmd->result = DID_ERROR << 16;
2464 dev_warn(&h->pdev->dev, "CDB %16phN : protocol error\n",
2467 case CMD_HARDWARE_ERR:
2468 cmd->result = DID_ERROR << 16;
2469 dev_warn(&h->pdev->dev, "CDB %16phN : hardware error\n",
2472 case CMD_CONNECTION_LOST:
2473 cmd->result = DID_ERROR << 16;
2474 dev_warn(&h->pdev->dev, "CDB %16phN : connection lost\n",
2478 /* Return now to avoid calling scsi_done(). */
2479 return hpsa_cmd_abort_and_free(h, cp, cmd);
2480 case CMD_ABORT_FAILED:
2481 cmd->result = DID_ERROR << 16;
2482 dev_warn(&h->pdev->dev, "CDB %16phN : abort failed\n",
2485 case CMD_UNSOLICITED_ABORT:
2486 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
2487 dev_warn(&h->pdev->dev, "CDB %16phN : unsolicited abort\n",
2491 cmd->result = DID_TIME_OUT << 16;
2492 dev_warn(&h->pdev->dev, "CDB %16phN timed out\n",
2495 case CMD_UNABORTABLE:
2496 cmd->result = DID_ERROR << 16;
2497 dev_warn(&h->pdev->dev, "Command unabortable\n");
2499 case CMD_TMF_STATUS:
2500 if (hpsa_evaluate_tmf_status(h, cp)) /* TMF failed? */
2501 cmd->result = DID_ERROR << 16;
2503 case CMD_IOACCEL_DISABLED:
2504 /* This only handles the direct pass-through case since RAID
2505 * offload is handled above. Just attempt a retry.
2507 cmd->result = DID_SOFT_ERROR << 16;
2508 dev_warn(&h->pdev->dev,
2509 "cp %p had HP SSD Smart Path error\n", cp);
2512 cmd->result = DID_ERROR << 16;
2513 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
2514 cp, ei->CommandStatus);
2517 return hpsa_cmd_free_and_done(h, cp, cmd);
2520 static void hpsa_pci_unmap(struct pci_dev *pdev,
2521 struct CommandList *c, int sg_used, int data_direction)
2525 for (i = 0; i < sg_used; i++)
2526 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
2527 le32_to_cpu(c->SG[i].Len),
2531 static int hpsa_map_one(struct pci_dev *pdev,
2532 struct CommandList *cp,
2539 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
2540 cp->Header.SGList = 0;
2541 cp->Header.SGTotal = cpu_to_le16(0);
2545 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
2546 if (dma_mapping_error(&pdev->dev, addr64)) {
2547 /* Prevent subsequent unmap of something never mapped */
2548 cp->Header.SGList = 0;
2549 cp->Header.SGTotal = cpu_to_le16(0);
2552 cp->SG[0].Addr = cpu_to_le64(addr64);
2553 cp->SG[0].Len = cpu_to_le32(buflen);
2554 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
2555 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
2556 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
2560 #define NO_TIMEOUT ((unsigned long) -1)
2561 #define DEFAULT_TIMEOUT 30000 /* milliseconds */
2562 static int hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
2563 struct CommandList *c, int reply_queue, unsigned long timeout_msecs)
2565 DECLARE_COMPLETION_ONSTACK(wait);
2568 __enqueue_cmd_and_start_io(h, c, reply_queue);
2569 if (timeout_msecs == NO_TIMEOUT) {
2570 /* TODO: get rid of this no-timeout thing */
2571 wait_for_completion_io(&wait);
2574 if (!wait_for_completion_io_timeout(&wait,
2575 msecs_to_jiffies(timeout_msecs))) {
2576 dev_warn(&h->pdev->dev, "Command timed out.\n");
2582 static int hpsa_scsi_do_simple_cmd(struct ctlr_info *h, struct CommandList *c,
2583 int reply_queue, unsigned long timeout_msecs)
2585 if (unlikely(lockup_detected(h))) {
2586 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
2589 return hpsa_scsi_do_simple_cmd_core(h, c, reply_queue, timeout_msecs);
2592 static u32 lockup_detected(struct ctlr_info *h)
2595 u32 rc, *lockup_detected;
2598 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2599 rc = *lockup_detected;
2604 #define MAX_DRIVER_CMD_RETRIES 25
2605 static int hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2606 struct CommandList *c, int data_direction, unsigned long timeout_msecs)
2608 int backoff_time = 10, retry_count = 0;
2612 memset(c->err_info, 0, sizeof(*c->err_info));
2613 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE,
2618 if (retry_count > 3) {
2619 msleep(backoff_time);
2620 if (backoff_time < 1000)
2623 } while ((check_for_unit_attention(h, c) ||
2624 check_for_busy(h, c)) &&
2625 retry_count <= MAX_DRIVER_CMD_RETRIES);
2626 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2627 if (retry_count > MAX_DRIVER_CMD_RETRIES)
2632 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2633 struct CommandList *c)
2635 const u8 *cdb = c->Request.CDB;
2636 const u8 *lun = c->Header.LUN.LunAddrBytes;
2638 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2639 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2640 txt, lun[0], lun[1], lun[2], lun[3],
2641 lun[4], lun[5], lun[6], lun[7],
2642 cdb[0], cdb[1], cdb[2], cdb[3],
2643 cdb[4], cdb[5], cdb[6], cdb[7],
2644 cdb[8], cdb[9], cdb[10], cdb[11],
2645 cdb[12], cdb[13], cdb[14], cdb[15]);
2648 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2649 struct CommandList *cp)
2651 const struct ErrorInfo *ei = cp->err_info;
2652 struct device *d = &cp->h->pdev->dev;
2653 u8 sense_key, asc, ascq;
2656 switch (ei->CommandStatus) {
2657 case CMD_TARGET_STATUS:
2658 if (ei->SenseLen > sizeof(ei->SenseInfo))
2659 sense_len = sizeof(ei->SenseInfo);
2661 sense_len = ei->SenseLen;
2662 decode_sense_data(ei->SenseInfo, sense_len,
2663 &sense_key, &asc, &ascq);
2664 hpsa_print_cmd(h, "SCSI status", cp);
2665 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2666 dev_warn(d, "SCSI Status = 02, Sense key = 0x%02x, ASC = 0x%02x, ASCQ = 0x%02x\n",
2667 sense_key, asc, ascq);
2669 dev_warn(d, "SCSI Status = 0x%02x\n", ei->ScsiStatus);
2670 if (ei->ScsiStatus == 0)
2671 dev_warn(d, "SCSI status is abnormally zero. "
2672 "(probably indicates selection timeout "
2673 "reported incorrectly due to a known "
2674 "firmware bug, circa July, 2001.)\n");
2676 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2678 case CMD_DATA_OVERRUN:
2679 hpsa_print_cmd(h, "overrun condition", cp);
2682 /* controller unfortunately reports SCSI passthru's
2683 * to non-existent targets as invalid commands.
2685 hpsa_print_cmd(h, "invalid command", cp);
2686 dev_warn(d, "probably means device no longer present\n");
2689 case CMD_PROTOCOL_ERR:
2690 hpsa_print_cmd(h, "protocol error", cp);
2692 case CMD_HARDWARE_ERR:
2693 hpsa_print_cmd(h, "hardware error", cp);
2695 case CMD_CONNECTION_LOST:
2696 hpsa_print_cmd(h, "connection lost", cp);
2699 hpsa_print_cmd(h, "aborted", cp);
2701 case CMD_ABORT_FAILED:
2702 hpsa_print_cmd(h, "abort failed", cp);
2704 case CMD_UNSOLICITED_ABORT:
2705 hpsa_print_cmd(h, "unsolicited abort", cp);
2708 hpsa_print_cmd(h, "timed out", cp);
2710 case CMD_UNABORTABLE:
2711 hpsa_print_cmd(h, "unabortable", cp);
2713 case CMD_CTLR_LOCKUP:
2714 hpsa_print_cmd(h, "controller lockup detected", cp);
2717 hpsa_print_cmd(h, "unknown status", cp);
2718 dev_warn(d, "Unknown command status %x\n",
2723 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2724 u16 page, unsigned char *buf,
2725 unsigned char bufsize)
2728 struct CommandList *c;
2729 struct ErrorInfo *ei;
2733 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2734 page, scsi3addr, TYPE_CMD)) {
2738 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
2739 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
2743 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2744 hpsa_scsi_interpret_error(h, c);
2752 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2753 u8 reset_type, int reply_queue)
2756 struct CommandList *c;
2757 struct ErrorInfo *ei;
2762 /* fill_cmd can't fail here, no data buffer to map. */
2763 (void) fill_cmd(c, reset_type, h, NULL, 0, 0,
2764 scsi3addr, TYPE_MSG);
2765 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
2767 dev_warn(&h->pdev->dev, "Failed to send reset command\n");
2770 /* no unmap needed here because no data xfer. */
2773 if (ei->CommandStatus != 0) {
2774 hpsa_scsi_interpret_error(h, c);
2782 static bool hpsa_cmd_dev_match(struct ctlr_info *h, struct CommandList *c,
2783 struct hpsa_scsi_dev_t *dev,
2784 unsigned char *scsi3addr)
2788 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
2789 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
2791 if (hpsa_is_cmd_idle(c))
2794 switch (c->cmd_type) {
2796 case CMD_IOCTL_PEND:
2797 match = !memcmp(scsi3addr, &c->Header.LUN.LunAddrBytes,
2798 sizeof(c->Header.LUN.LunAddrBytes));
2803 if (c->phys_disk == dev) {
2804 /* HBA mode match */
2807 /* Possible RAID mode -- check each phys dev. */
2808 /* FIXME: Do we need to take out a lock here? If
2809 * so, we could just call hpsa_get_pdisk_of_ioaccel2()
2811 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2812 /* FIXME: an alternate test might be
2814 * match = dev->phys_disk[i]->ioaccel_handle
2815 * == c2->scsi_nexus; */
2816 match = dev->phys_disk[i] == c->phys_disk;
2822 for (i = 0; i < dev->nphysical_disks && !match; i++) {
2823 match = dev->phys_disk[i]->ioaccel_handle ==
2824 le32_to_cpu(ac->it_nexus);
2828 case 0: /* The command is in the middle of being initialized. */
2833 dev_err(&h->pdev->dev, "unexpected cmd_type: %d\n",
2841 static int hpsa_do_reset(struct ctlr_info *h, struct hpsa_scsi_dev_t *dev,
2842 unsigned char *scsi3addr, u8 reset_type, int reply_queue)
2847 /* We can really only handle one reset at a time */
2848 if (mutex_lock_interruptible(&h->reset_mutex) == -EINTR) {
2849 dev_warn(&h->pdev->dev, "concurrent reset wait interrupted.\n");
2853 BUG_ON(atomic_read(&dev->reset_cmds_out) != 0);
2855 for (i = 0; i < h->nr_cmds; i++) {
2856 struct CommandList *c = h->cmd_pool + i;
2857 int refcount = atomic_inc_return(&c->refcount);
2859 if (refcount > 1 && hpsa_cmd_dev_match(h, c, dev, scsi3addr)) {
2860 unsigned long flags;
2863 * Mark the target command as having a reset pending,
2864 * then lock a lock so that the command cannot complete
2865 * while we're considering it. If the command is not
2866 * idle then count it; otherwise revoke the event.
2868 c->reset_pending = dev;
2869 spin_lock_irqsave(&h->lock, flags); /* Implied MB */
2870 if (!hpsa_is_cmd_idle(c))
2871 atomic_inc(&dev->reset_cmds_out);
2873 c->reset_pending = NULL;
2874 spin_unlock_irqrestore(&h->lock, flags);
2880 rc = hpsa_send_reset(h, scsi3addr, reset_type, reply_queue);
2882 wait_event(h->event_sync_wait_queue,
2883 atomic_read(&dev->reset_cmds_out) == 0 ||
2884 lockup_detected(h));
2886 if (unlikely(lockup_detected(h))) {
2887 dev_warn(&h->pdev->dev,
2888 "Controller lockup detected during reset wait\n");
2893 atomic_set(&dev->reset_cmds_out, 0);
2895 mutex_unlock(&h->reset_mutex);
2899 static void hpsa_get_raid_level(struct ctlr_info *h,
2900 unsigned char *scsi3addr, unsigned char *raid_level)
2905 *raid_level = RAID_UNKNOWN;
2906 buf = kzalloc(64, GFP_KERNEL);
2909 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2911 *raid_level = buf[8];
2912 if (*raid_level > RAID_UNKNOWN)
2913 *raid_level = RAID_UNKNOWN;
2918 #define HPSA_MAP_DEBUG
2919 #ifdef HPSA_MAP_DEBUG
2920 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2921 struct raid_map_data *map_buff)
2923 struct raid_map_disk_data *dd = &map_buff->data[0];
2925 u16 map_cnt, row_cnt, disks_per_row;
2930 /* Show details only if debugging has been activated. */
2931 if (h->raid_offload_debug < 2)
2934 dev_info(&h->pdev->dev, "structure_size = %u\n",
2935 le32_to_cpu(map_buff->structure_size));
2936 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2937 le32_to_cpu(map_buff->volume_blk_size));
2938 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2939 le64_to_cpu(map_buff->volume_blk_cnt));
2940 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2941 map_buff->phys_blk_shift);
2942 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2943 map_buff->parity_rotation_shift);
2944 dev_info(&h->pdev->dev, "strip_size = %u\n",
2945 le16_to_cpu(map_buff->strip_size));
2946 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2947 le64_to_cpu(map_buff->disk_starting_blk));
2948 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2949 le64_to_cpu(map_buff->disk_blk_cnt));
2950 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2951 le16_to_cpu(map_buff->data_disks_per_row));
2952 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2953 le16_to_cpu(map_buff->metadata_disks_per_row));
2954 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2955 le16_to_cpu(map_buff->row_cnt));
2956 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2957 le16_to_cpu(map_buff->layout_map_count));
2958 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2959 le16_to_cpu(map_buff->flags));
2960 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2961 le16_to_cpu(map_buff->flags) &
2962 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2963 dev_info(&h->pdev->dev, "dekindex = %u\n",
2964 le16_to_cpu(map_buff->dekindex));
2965 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2966 for (map = 0; map < map_cnt; map++) {
2967 dev_info(&h->pdev->dev, "Map%u:\n", map);
2968 row_cnt = le16_to_cpu(map_buff->row_cnt);
2969 for (row = 0; row < row_cnt; row++) {
2970 dev_info(&h->pdev->dev, " Row%u:\n", row);
2972 le16_to_cpu(map_buff->data_disks_per_row);
2973 for (col = 0; col < disks_per_row; col++, dd++)
2974 dev_info(&h->pdev->dev,
2975 " D%02u: h=0x%04x xor=%u,%u\n",
2976 col, dd->ioaccel_handle,
2977 dd->xor_mult[0], dd->xor_mult[1]);
2979 le16_to_cpu(map_buff->metadata_disks_per_row);
2980 for (col = 0; col < disks_per_row; col++, dd++)
2981 dev_info(&h->pdev->dev,
2982 " M%02u: h=0x%04x xor=%u,%u\n",
2983 col, dd->ioaccel_handle,
2984 dd->xor_mult[0], dd->xor_mult[1]);
2989 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2990 __attribute__((unused)) int rc,
2991 __attribute__((unused)) struct raid_map_data *map_buff)
2996 static int hpsa_get_raid_map(struct ctlr_info *h,
2997 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3000 struct CommandList *c;
3001 struct ErrorInfo *ei;
3005 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
3006 sizeof(this_device->raid_map), 0,
3007 scsi3addr, TYPE_CMD)) {
3008 dev_warn(&h->pdev->dev, "hpsa_get_raid_map fill_cmd failed\n");
3012 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3013 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3017 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3018 hpsa_scsi_interpret_error(h, c);
3024 /* @todo in the future, dynamically allocate RAID map memory */
3025 if (le32_to_cpu(this_device->raid_map.structure_size) >
3026 sizeof(this_device->raid_map)) {
3027 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
3030 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
3037 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
3038 unsigned char scsi3addr[], u16 bmic_device_index,
3039 struct bmic_identify_physical_device *buf, size_t bufsize)
3042 struct CommandList *c;
3043 struct ErrorInfo *ei;
3046 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
3047 0, RAID_CTLR_LUNID, TYPE_CMD);
3051 c->Request.CDB[2] = bmic_device_index & 0xff;
3052 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
3054 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE,
3057 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
3058 hpsa_scsi_interpret_error(h, c);
3066 static int hpsa_vpd_page_supported(struct ctlr_info *h,
3067 unsigned char scsi3addr[], u8 page)
3072 unsigned char *buf, bufsize;
3074 buf = kzalloc(256, GFP_KERNEL);
3078 /* Get the size of the page list first */
3079 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3080 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3081 buf, HPSA_VPD_HEADER_SZ);
3083 goto exit_unsupported;
3085 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
3086 bufsize = pages + HPSA_VPD_HEADER_SZ;
3090 /* Get the whole VPD page list */
3091 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3092 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
3095 goto exit_unsupported;
3098 for (i = 1; i <= pages; i++)
3099 if (buf[3 + i] == page)
3100 goto exit_supported;
3109 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
3110 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
3116 this_device->offload_config = 0;
3117 this_device->offload_enabled = 0;
3118 this_device->offload_to_be_enabled = 0;
3120 buf = kzalloc(64, GFP_KERNEL);
3123 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
3125 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
3126 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
3130 #define IOACCEL_STATUS_BYTE 4
3131 #define OFFLOAD_CONFIGURED_BIT 0x01
3132 #define OFFLOAD_ENABLED_BIT 0x02
3133 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
3134 this_device->offload_config =
3135 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
3136 if (this_device->offload_config) {
3137 this_device->offload_enabled =
3138 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
3139 if (hpsa_get_raid_map(h, scsi3addr, this_device))
3140 this_device->offload_enabled = 0;
3142 this_device->offload_to_be_enabled = this_device->offload_enabled;
3148 /* Get the device id from inquiry page 0x83 */
3149 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
3150 unsigned char *device_id, int index, int buflen)
3157 buf = kzalloc(64, GFP_KERNEL);
3160 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
3162 memcpy(device_id, &buf[index], buflen);
3169 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
3170 void *buf, int bufsize,
3171 int extended_response)
3174 struct CommandList *c;
3175 unsigned char scsi3addr[8];
3176 struct ErrorInfo *ei;
3180 /* address the controller */
3181 memset(scsi3addr, 0, sizeof(scsi3addr));
3182 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
3183 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
3187 if (extended_response)
3188 c->Request.CDB[1] = extended_response;
3189 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
3190 PCI_DMA_FROMDEVICE, NO_TIMEOUT);
3194 if (ei->CommandStatus != 0 &&
3195 ei->CommandStatus != CMD_DATA_UNDERRUN) {
3196 hpsa_scsi_interpret_error(h, c);
3199 struct ReportLUNdata *rld = buf;
3201 if (rld->extended_response_flag != extended_response) {
3202 dev_err(&h->pdev->dev,
3203 "report luns requested format %u, got %u\n",
3205 rld->extended_response_flag);
3214 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
3215 struct ReportExtendedLUNdata *buf, int bufsize)
3217 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
3218 HPSA_REPORT_PHYS_EXTENDED);
3221 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
3222 struct ReportLUNdata *buf, int bufsize)
3224 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
3227 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
3228 int bus, int target, int lun)
3231 device->target = target;
3235 /* Use VPD inquiry to get details of volume status */
3236 static int hpsa_get_volume_status(struct ctlr_info *h,
3237 unsigned char scsi3addr[])
3244 buf = kzalloc(64, GFP_KERNEL);
3246 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3248 /* Does controller have VPD for logical volume status? */
3249 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
3252 /* Get the size of the VPD return buffer */
3253 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3254 buf, HPSA_VPD_HEADER_SZ);
3259 /* Now get the whole VPD buffer */
3260 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
3261 buf, size + HPSA_VPD_HEADER_SZ);
3264 status = buf[4]; /* status byte */
3270 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
3273 /* Determine offline status of a volume.
3276 * 0xff (offline for unknown reasons)
3277 * # (integer code indicating one of several NOT READY states
3278 * describing why a volume is to be kept offline)
3280 static int hpsa_volume_offline(struct ctlr_info *h,
3281 unsigned char scsi3addr[])
3283 struct CommandList *c;
3284 unsigned char *sense;
3285 u8 sense_key, asc, ascq;
3290 #define ASC_LUN_NOT_READY 0x04
3291 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
3292 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
3296 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
3297 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3302 sense = c->err_info->SenseInfo;
3303 if (c->err_info->SenseLen > sizeof(c->err_info->SenseInfo))
3304 sense_len = sizeof(c->err_info->SenseInfo);
3306 sense_len = c->err_info->SenseLen;
3307 decode_sense_data(sense, sense_len, &sense_key, &asc, &ascq);
3308 cmd_status = c->err_info->CommandStatus;
3309 scsi_status = c->err_info->ScsiStatus;
3311 /* Is the volume 'not ready'? */
3312 if (cmd_status != CMD_TARGET_STATUS ||
3313 scsi_status != SAM_STAT_CHECK_CONDITION ||
3314 sense_key != NOT_READY ||
3315 asc != ASC_LUN_NOT_READY) {
3319 /* Determine the reason for not ready state */
3320 ldstat = hpsa_get_volume_status(h, scsi3addr);
3322 /* Keep volume offline in certain cases: */
3324 case HPSA_LV_UNDERGOING_ERASE:
3325 case HPSA_LV_NOT_AVAILABLE:
3326 case HPSA_LV_UNDERGOING_RPI:
3327 case HPSA_LV_PENDING_RPI:
3328 case HPSA_LV_ENCRYPTED_NO_KEY:
3329 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
3330 case HPSA_LV_UNDERGOING_ENCRYPTION:
3331 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
3332 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
3334 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
3335 /* If VPD status page isn't available,
3336 * use ASC/ASCQ to determine state
3338 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
3339 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
3349 * Find out if a logical device supports aborts by simply trying one.
3350 * Smart Array may claim not to support aborts on logical drives, but
3351 * if a MSA2000 * is connected, the drives on that will be presented
3352 * by the Smart Array as logical drives, and aborts may be sent to
3353 * those devices successfully. So the simplest way to find out is
3354 * to simply try an abort and see how the device responds.
3356 static int hpsa_device_supports_aborts(struct ctlr_info *h,
3357 unsigned char *scsi3addr)
3359 struct CommandList *c;
3360 struct ErrorInfo *ei;
3363 u64 tag = (u64) -1; /* bogus tag */
3365 /* Assume that physical devices support aborts */
3366 if (!is_logical_dev_addr_mode(scsi3addr))
3371 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &tag, 0, 0, scsi3addr, TYPE_MSG);
3372 (void) hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
3373 /* no unmap needed here because no data xfer. */
3375 switch (ei->CommandStatus) {
3379 case CMD_UNABORTABLE:
3380 case CMD_ABORT_FAILED:
3383 case CMD_TMF_STATUS:
3384 rc = hpsa_evaluate_tmf_status(h, c);
3394 static void sanitize_inquiry_string(unsigned char *s, int len)
3396 bool terminated = false;
3398 for (; len > 0; (--len, ++s)) {
3401 if (terminated || *s < 0x20 || *s > 0x7e)
3406 static int hpsa_update_device_info(struct ctlr_info *h,
3407 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
3408 unsigned char *is_OBDR_device)
3411 #define OBDR_SIG_OFFSET 43
3412 #define OBDR_TAPE_SIG "$DR-10"
3413 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
3414 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
3416 unsigned char *inq_buff;
3417 unsigned char *obdr_sig;
3420 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
3426 /* Do an inquiry to the device to see what it is. */
3427 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
3428 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
3429 /* Inquiry failed (msg printed already) */
3430 dev_err(&h->pdev->dev,
3431 "hpsa_update_device_info: inquiry failed\n");
3436 sanitize_inquiry_string(&inq_buff[8], 8);
3437 sanitize_inquiry_string(&inq_buff[16], 16);
3439 this_device->devtype = (inq_buff[0] & 0x1f);
3440 memcpy(this_device->scsi3addr, scsi3addr, 8);
3441 memcpy(this_device->vendor, &inq_buff[8],
3442 sizeof(this_device->vendor));
3443 memcpy(this_device->model, &inq_buff[16],
3444 sizeof(this_device->model));
3445 memset(this_device->device_id, 0,
3446 sizeof(this_device->device_id));
3447 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 8,
3448 sizeof(this_device->device_id));
3450 if (this_device->devtype == TYPE_DISK &&
3451 is_logical_dev_addr_mode(scsi3addr)) {
3454 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
3455 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
3456 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
3457 volume_offline = hpsa_volume_offline(h, scsi3addr);
3458 if (volume_offline < 0 || volume_offline > 0xff)
3459 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
3460 this_device->volume_offline = volume_offline & 0xff;
3462 this_device->raid_level = RAID_UNKNOWN;
3463 this_device->offload_config = 0;
3464 this_device->offload_enabled = 0;
3465 this_device->offload_to_be_enabled = 0;
3466 this_device->hba_ioaccel_enabled = 0;
3467 this_device->volume_offline = 0;
3468 this_device->queue_depth = h->nr_cmds;
3471 if (is_OBDR_device) {
3472 /* See if this is a One-Button-Disaster-Recovery device
3473 * by looking for "$DR-10" at offset 43 in inquiry data.
3475 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
3476 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
3477 strncmp(obdr_sig, OBDR_TAPE_SIG,
3478 OBDR_SIG_LEN) == 0);
3488 static void hpsa_update_device_supports_aborts(struct ctlr_info *h,
3489 struct hpsa_scsi_dev_t *dev, u8 *scsi3addr)
3491 unsigned long flags;
3494 * See if this device supports aborts. If we already know
3495 * the device, we already know if it supports aborts, otherwise
3496 * we have to find out if it supports aborts by trying one.
3498 spin_lock_irqsave(&h->devlock, flags);
3499 rc = hpsa_scsi_find_entry(dev, h->dev, h->ndevices, &entry);
3500 if ((rc == DEVICE_SAME || rc == DEVICE_UPDATED) &&
3501 entry >= 0 && entry < h->ndevices) {
3502 dev->supports_aborts = h->dev[entry]->supports_aborts;
3503 spin_unlock_irqrestore(&h->devlock, flags);
3505 spin_unlock_irqrestore(&h->devlock, flags);
3506 dev->supports_aborts =
3507 hpsa_device_supports_aborts(h, scsi3addr);
3508 if (dev->supports_aborts < 0)
3509 dev->supports_aborts = 0;
3513 static unsigned char *ext_target_model[] = {
3523 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
3527 for (i = 0; ext_target_model[i]; i++)
3528 if (strncmp(device->model, ext_target_model[i],
3529 strlen(ext_target_model[i])) == 0)
3535 * Helper function to assign bus, target, lun mapping of devices.
3536 * Logical drive target and lun are assigned at this time, but
3537 * physical device lun and target assignment are deferred (assigned
3538 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
3540 static void figure_bus_target_lun(struct ctlr_info *h,
3541 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
3543 u32 lunid = get_unaligned_le32(lunaddrbytes);
3545 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
3546 /* physical device, target and lun filled in later */
3547 if (is_hba_lunid(lunaddrbytes))
3548 hpsa_set_bus_target_lun(device,
3549 HPSA_HBA_BUS, 0, lunid & 0x3fff);
3551 /* defer target, lun assignment for physical devices */
3552 hpsa_set_bus_target_lun(device,
3553 HPSA_PHYSICAL_DEVICE_BUS, -1, -1);
3556 /* It's a logical device */
3557 if (is_ext_target(h, device)) {
3558 hpsa_set_bus_target_lun(device,
3559 HPSA_EXTERNAL_RAID_VOLUME_BUS, (lunid >> 16) & 0x3fff,
3563 hpsa_set_bus_target_lun(device, HPSA_RAID_VOLUME_BUS,
3568 * If there is no lun 0 on a target, linux won't find any devices.
3569 * For the external targets (arrays), we have to manually detect the enclosure
3570 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
3571 * it for some reason. *tmpdevice is the target we're adding,
3572 * this_device is a pointer into the current element of currentsd[]
3573 * that we're building up in update_scsi_devices(), below.
3574 * lunzerobits is a bitmap that tracks which targets already have a
3576 * Returns 1 if an enclosure was added, 0 if not.
3578 static int add_ext_target_dev(struct ctlr_info *h,
3579 struct hpsa_scsi_dev_t *tmpdevice,
3580 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
3581 unsigned long lunzerobits[], int *n_ext_target_devs)
3583 unsigned char scsi3addr[8];
3585 if (test_bit(tmpdevice->target, lunzerobits))
3586 return 0; /* There is already a lun 0 on this target. */
3588 if (!is_logical_dev_addr_mode(lunaddrbytes))
3589 return 0; /* It's the logical targets that may lack lun 0. */
3591 if (!is_ext_target(h, tmpdevice))
3592 return 0; /* Only external target devices have this problem. */
3594 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
3597 memset(scsi3addr, 0, 8);
3598 scsi3addr[3] = tmpdevice->target;
3599 if (is_hba_lunid(scsi3addr))
3600 return 0; /* Don't add the RAID controller here. */
3602 if (is_scsi_rev_5(h))
3603 return 0; /* p1210m doesn't need to do this. */
3605 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
3606 dev_warn(&h->pdev->dev, "Maximum number of external "
3607 "target devices exceeded. Check your hardware "
3612 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
3614 (*n_ext_target_devs)++;
3615 hpsa_set_bus_target_lun(this_device,
3616 tmpdevice->bus, tmpdevice->target, 0);
3617 hpsa_update_device_supports_aborts(h, this_device, scsi3addr);
3618 set_bit(tmpdevice->target, lunzerobits);
3623 * Get address of physical disk used for an ioaccel2 mode command:
3624 * 1. Extract ioaccel2 handle from the command.
3625 * 2. Find a matching ioaccel2 handle from list of physical disks.
3627 * 1 and set scsi3addr to address of matching physical
3628 * 0 if no matching physical disk was found.
3630 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
3631 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
3633 struct io_accel2_cmd *c2 =
3634 &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
3635 unsigned long flags;
3638 spin_lock_irqsave(&h->devlock, flags);
3639 for (i = 0; i < h->ndevices; i++)
3640 if (h->dev[i]->ioaccel_handle == le32_to_cpu(c2->scsi_nexus)) {
3641 memcpy(scsi3addr, h->dev[i]->scsi3addr,
3642 sizeof(h->dev[i]->scsi3addr));
3643 spin_unlock_irqrestore(&h->devlock, flags);
3646 spin_unlock_irqrestore(&h->devlock, flags);
3651 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
3652 * logdev. The number of luns in physdev and logdev are returned in
3653 * *nphysicals and *nlogicals, respectively.
3654 * Returns 0 on success, -1 otherwise.
3656 static int hpsa_gather_lun_info(struct ctlr_info *h,
3657 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
3658 struct ReportLUNdata *logdev, u32 *nlogicals)
3660 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
3661 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
3664 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
3665 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
3666 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
3667 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
3668 *nphysicals = HPSA_MAX_PHYS_LUN;
3670 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
3671 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
3674 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
3675 /* Reject Logicals in excess of our max capability. */
3676 if (*nlogicals > HPSA_MAX_LUN) {
3677 dev_warn(&h->pdev->dev,
3678 "maximum logical LUNs (%d) exceeded. "
3679 "%d LUNs ignored.\n", HPSA_MAX_LUN,
3680 *nlogicals - HPSA_MAX_LUN);
3681 *nlogicals = HPSA_MAX_LUN;
3683 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
3684 dev_warn(&h->pdev->dev,
3685 "maximum logical + physical LUNs (%d) exceeded. "
3686 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
3687 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
3688 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
3693 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
3694 int i, int nphysicals, int nlogicals,
3695 struct ReportExtendedLUNdata *physdev_list,
3696 struct ReportLUNdata *logdev_list)
3698 /* Helper function, figure out where the LUN ID info is coming from
3699 * given index i, lists of physical and logical devices, where in
3700 * the list the raid controller is supposed to appear (first or last)
3703 int logicals_start = nphysicals + (raid_ctlr_position == 0);
3704 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
3706 if (i == raid_ctlr_position)
3707 return RAID_CTLR_LUNID;
3709 if (i < logicals_start)
3710 return &physdev_list->LUN[i -
3711 (raid_ctlr_position == 0)].lunid[0];
3713 if (i < last_device)
3714 return &logdev_list->LUN[i - nphysicals -
3715 (raid_ctlr_position == 0)][0];
3720 /* get physical drive ioaccel handle and queue depth */
3721 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3722 struct hpsa_scsi_dev_t *dev,
3723 struct ReportExtendedLUNdata *rlep, int rle_index,
3724 struct bmic_identify_physical_device *id_phys)
3727 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3729 dev->ioaccel_handle = rle->ioaccel_handle;
3730 if ((rle->device_flags & 0x08) && dev->ioaccel_handle)
3731 dev->hba_ioaccel_enabled = 1;
3732 memset(id_phys, 0, sizeof(*id_phys));
3733 rc = hpsa_bmic_id_physical_device(h, &rle->lunid[0],
3734 GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]), id_phys,
3737 /* Reserve space for FW operations */
3738 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3739 #define DRIVE_QUEUE_DEPTH 7
3741 le16_to_cpu(id_phys->current_queue_depth_limit) -
3742 DRIVE_CMDS_RESERVED_FOR_FW;
3744 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3747 static void hpsa_get_path_info(struct hpsa_scsi_dev_t *this_device,
3748 struct ReportExtendedLUNdata *rlep, int rle_index,
3749 struct bmic_identify_physical_device *id_phys)
3751 struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
3753 if ((rle->device_flags & 0x08) && this_device->ioaccel_handle)
3754 this_device->hba_ioaccel_enabled = 1;
3756 memcpy(&this_device->active_path_index,
3757 &id_phys->active_path_number,
3758 sizeof(this_device->active_path_index));
3759 memcpy(&this_device->path_map,
3760 &id_phys->redundant_path_present_map,
3761 sizeof(this_device->path_map));
3762 memcpy(&this_device->box,
3763 &id_phys->alternate_paths_phys_box_on_port,
3764 sizeof(this_device->box));
3765 memcpy(&this_device->phys_connector,
3766 &id_phys->alternate_paths_phys_connector,
3767 sizeof(this_device->phys_connector));
3768 memcpy(&this_device->bay,
3769 &id_phys->phys_bay_in_box,
3770 sizeof(this_device->bay));
3773 static void hpsa_update_scsi_devices(struct ctlr_info *h)
3775 /* the idea here is we could get notified
3776 * that some devices have changed, so we do a report
3777 * physical luns and report logical luns cmd, and adjust
3778 * our list of devices accordingly.
3780 * The scsi3addr's of devices won't change so long as the
3781 * adapter is not reset. That means we can rescan and
3782 * tell which devices we already know about, vs. new
3783 * devices, vs. disappearing devices.
3785 struct ReportExtendedLUNdata *physdev_list = NULL;
3786 struct ReportLUNdata *logdev_list = NULL;
3787 struct bmic_identify_physical_device *id_phys = NULL;
3790 u32 ndev_allocated = 0;
3791 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3793 int i, n_ext_target_devs, ndevs_to_allocate;
3794 int raid_ctlr_position;
3795 bool physical_device;
3796 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3798 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3799 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3800 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3801 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3802 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3804 if (!currentsd || !physdev_list || !logdev_list ||
3805 !tmpdevice || !id_phys) {
3806 dev_err(&h->pdev->dev, "out of memory\n");
3809 memset(lunzerobits, 0, sizeof(lunzerobits));
3811 h->drv_req_rescan = 0; /* cancel scheduled rescan - we're doing it. */
3813 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3814 logdev_list, &nlogicals)) {
3815 h->drv_req_rescan = 1;
3819 /* We might see up to the maximum number of logical and physical disks
3820 * plus external target devices, and a device for the local RAID
3823 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3825 /* Allocate the per device structures */
3826 for (i = 0; i < ndevs_to_allocate; i++) {
3827 if (i >= HPSA_MAX_DEVICES) {
3828 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3829 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3830 ndevs_to_allocate - HPSA_MAX_DEVICES);
3834 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3835 if (!currentsd[i]) {
3836 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3837 __FILE__, __LINE__);
3838 h->drv_req_rescan = 1;
3844 if (is_scsi_rev_5(h))
3845 raid_ctlr_position = 0;
3847 raid_ctlr_position = nphysicals + nlogicals;
3849 /* adjust our table of devices */
3850 n_ext_target_devs = 0;
3851 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3852 u8 *lunaddrbytes, is_OBDR = 0;
3854 int phys_dev_index = i - (raid_ctlr_position == 0);
3856 physical_device = i < nphysicals + (raid_ctlr_position == 0);
3858 /* Figure out where the LUN ID info is coming from */
3859 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3860 i, nphysicals, nlogicals, physdev_list, logdev_list);
3862 /* skip masked non-disk devices */
3863 if (MASKED_DEVICE(lunaddrbytes) && physical_device &&
3864 (physdev_list->LUN[phys_dev_index].device_flags & 0x01))
3867 /* Get device type, vendor, model, device id */
3868 rc = hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3870 if (rc == -ENOMEM) {
3871 dev_warn(&h->pdev->dev,
3872 "Out of memory, rescan deferred.\n");
3873 h->drv_req_rescan = 1;
3877 dev_warn(&h->pdev->dev,
3878 "Inquiry failed, skipping device.\n");
3882 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3883 hpsa_update_device_supports_aborts(h, tmpdevice, lunaddrbytes);
3884 this_device = currentsd[ncurrent];
3887 * For external target devices, we have to insert a LUN 0 which
3888 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3889 * is nonetheless an enclosure device there. We have to
3890 * present that otherwise linux won't find anything if
3891 * there is no lun 0.
3893 if (add_ext_target_dev(h, tmpdevice, this_device,
3894 lunaddrbytes, lunzerobits,
3895 &n_ext_target_devs)) {
3897 this_device = currentsd[ncurrent];
3900 *this_device = *tmpdevice;
3901 this_device->physical_device = physical_device;
3904 * Expose all devices except for physical devices that
3907 if (MASKED_DEVICE(lunaddrbytes) && this_device->physical_device)
3908 this_device->expose_device = 0;
3910 this_device->expose_device = 1;
3912 switch (this_device->devtype) {
3914 /* We don't *really* support actual CD-ROM devices,
3915 * just "One Button Disaster Recovery" tape drive
3916 * which temporarily pretends to be a CD-ROM drive.
3917 * So we check that the device is really an OBDR tape
3918 * device by checking for "$DR-10" in bytes 43-48 of
3925 if (this_device->physical_device) {
3926 /* The disk is in HBA mode. */
3927 /* Never use RAID mapper in HBA mode. */
3928 this_device->offload_enabled = 0;
3929 hpsa_get_ioaccel_drive_info(h, this_device,
3930 physdev_list, phys_dev_index, id_phys);
3931 hpsa_get_path_info(this_device,
3932 physdev_list, phys_dev_index, id_phys);
3937 case TYPE_MEDIUM_CHANGER:
3938 case TYPE_ENCLOSURE:
3942 /* Only present the Smartarray HBA as a RAID controller.
3943 * If it's a RAID controller other than the HBA itself
3944 * (an external RAID controller, MSA500 or similar)
3947 if (!is_hba_lunid(lunaddrbytes))
3954 if (ncurrent >= HPSA_MAX_DEVICES)
3957 adjust_hpsa_scsi_table(h, currentsd, ncurrent);
3960 for (i = 0; i < ndev_allocated; i++)
3961 kfree(currentsd[i]);
3963 kfree(physdev_list);
3968 static void hpsa_set_sg_descriptor(struct SGDescriptor *desc,
3969 struct scatterlist *sg)
3971 u64 addr64 = (u64) sg_dma_address(sg);
3972 unsigned int len = sg_dma_len(sg);
3974 desc->Addr = cpu_to_le64(addr64);
3975 desc->Len = cpu_to_le32(len);
3980 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3981 * dma mapping and fills in the scatter gather entries of the
3984 static int hpsa_scatter_gather(struct ctlr_info *h,
3985 struct CommandList *cp,
3986 struct scsi_cmnd *cmd)
3988 struct scatterlist *sg;
3989 int use_sg, i, sg_limit, chained, last_sg;
3990 struct SGDescriptor *curr_sg;
3992 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3994 use_sg = scsi_dma_map(cmd);
3999 goto sglist_finished;
4002 * If the number of entries is greater than the max for a single list,
4003 * then we have a chained list; we will set up all but one entry in the
4004 * first list (the last entry is saved for link information);
4005 * otherwise, we don't have a chained list and we'll set up at each of
4006 * the entries in the one list.
4009 chained = use_sg > h->max_cmd_sg_entries;
4010 sg_limit = chained ? h->max_cmd_sg_entries - 1 : use_sg;
4011 last_sg = scsi_sg_count(cmd) - 1;
4012 scsi_for_each_sg(cmd, sg, sg_limit, i) {
4013 hpsa_set_sg_descriptor(curr_sg, sg);
4019 * Continue with the chained list. Set curr_sg to the chained
4020 * list. Modify the limit to the total count less the entries
4021 * we've already set up. Resume the scan at the list entry
4022 * where the previous loop left off.
4024 curr_sg = h->cmd_sg_list[cp->cmdindex];
4025 sg_limit = use_sg - sg_limit;
4026 for_each_sg(sg, sg, sg_limit, i) {
4027 hpsa_set_sg_descriptor(curr_sg, sg);
4032 /* Back the pointer up to the last entry and mark it as "last". */
4033 (curr_sg - 1)->Ext = cpu_to_le32(HPSA_SG_LAST);
4035 if (use_sg + chained > h->maxSG)
4036 h->maxSG = use_sg + chained;
4039 cp->Header.SGList = h->max_cmd_sg_entries;
4040 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
4041 if (hpsa_map_sg_chain_block(h, cp)) {
4042 scsi_dma_unmap(cmd);
4050 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
4051 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
4055 #define IO_ACCEL_INELIGIBLE (1)
4056 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
4062 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
4069 if (*cdb_len == 6) {
4070 block = get_unaligned_be16(&cdb[2]);
4075 BUG_ON(*cdb_len != 12);
4076 block = get_unaligned_be32(&cdb[2]);
4077 block_cnt = get_unaligned_be32(&cdb[6]);
4079 if (block_cnt > 0xffff)
4080 return IO_ACCEL_INELIGIBLE;
4082 cdb[0] = is_write ? WRITE_10 : READ_10;
4084 cdb[2] = (u8) (block >> 24);
4085 cdb[3] = (u8) (block >> 16);
4086 cdb[4] = (u8) (block >> 8);
4087 cdb[5] = (u8) (block);
4089 cdb[7] = (u8) (block_cnt >> 8);
4090 cdb[8] = (u8) (block_cnt);
4098 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
4099 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4100 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4102 struct scsi_cmnd *cmd = c->scsi_cmd;
4103 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
4105 unsigned int total_len = 0;
4106 struct scatterlist *sg;
4109 struct SGDescriptor *curr_sg;
4110 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
4112 /* TODO: implement chaining support */
4113 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
4114 atomic_dec(&phys_disk->ioaccel_cmds_out);
4115 return IO_ACCEL_INELIGIBLE;
4118 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
4120 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4121 atomic_dec(&phys_disk->ioaccel_cmds_out);
4122 return IO_ACCEL_INELIGIBLE;
4125 c->cmd_type = CMD_IOACCEL1;
4127 /* Adjust the DMA address to point to the accelerated command buffer */
4128 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
4129 (c->cmdindex * sizeof(*cp));
4130 BUG_ON(c->busaddr & 0x0000007F);
4132 use_sg = scsi_dma_map(cmd);
4134 atomic_dec(&phys_disk->ioaccel_cmds_out);
4140 scsi_for_each_sg(cmd, sg, use_sg, i) {
4141 addr64 = (u64) sg_dma_address(sg);
4142 len = sg_dma_len(sg);
4144 curr_sg->Addr = cpu_to_le64(addr64);
4145 curr_sg->Len = cpu_to_le32(len);
4146 curr_sg->Ext = cpu_to_le32(0);
4149 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
4151 switch (cmd->sc_data_direction) {
4153 control |= IOACCEL1_CONTROL_DATA_OUT;
4155 case DMA_FROM_DEVICE:
4156 control |= IOACCEL1_CONTROL_DATA_IN;
4159 control |= IOACCEL1_CONTROL_NODATAXFER;
4162 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4163 cmd->sc_data_direction);
4168 control |= IOACCEL1_CONTROL_NODATAXFER;
4171 c->Header.SGList = use_sg;
4172 /* Fill out the command structure to submit */
4173 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
4174 cp->transfer_len = cpu_to_le32(total_len);
4175 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
4176 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
4177 cp->control = cpu_to_le32(control);
4178 memcpy(cp->CDB, cdb, cdb_len);
4179 memcpy(cp->CISS_LUN, scsi3addr, 8);
4180 /* Tag was already set at init time. */
4181 enqueue_cmd_and_start_io(h, c);
4186 * Queue a command directly to a device behind the controller using the
4187 * I/O accelerator path.
4189 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
4190 struct CommandList *c)
4192 struct scsi_cmnd *cmd = c->scsi_cmd;
4193 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4197 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
4198 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
4202 * Set encryption parameters for the ioaccel2 request
4204 static void set_encrypt_ioaccel2(struct ctlr_info *h,
4205 struct CommandList *c, struct io_accel2_cmd *cp)
4207 struct scsi_cmnd *cmd = c->scsi_cmd;
4208 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4209 struct raid_map_data *map = &dev->raid_map;
4212 /* Are we doing encryption on this device */
4213 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
4215 /* Set the data encryption key index. */
4216 cp->dekindex = map->dekindex;
4218 /* Set the encryption enable flag, encoded into direction field. */
4219 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
4221 /* Set encryption tweak values based on logical block address
4222 * If block size is 512, tweak value is LBA.
4223 * For other block sizes, tweak is (LBA * block size)/ 512)
4225 switch (cmd->cmnd[0]) {
4226 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
4229 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4233 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
4236 first_block = get_unaligned_be32(&cmd->cmnd[2]);
4240 first_block = get_unaligned_be64(&cmd->cmnd[2]);
4243 dev_err(&h->pdev->dev,
4244 "ERROR: %s: size (0x%x) not supported for encryption\n",
4245 __func__, cmd->cmnd[0]);
4250 if (le32_to_cpu(map->volume_blk_size) != 512)
4251 first_block = first_block *
4252 le32_to_cpu(map->volume_blk_size)/512;
4254 cp->tweak_lower = cpu_to_le32(first_block);
4255 cp->tweak_upper = cpu_to_le32(first_block >> 32);
4258 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
4259 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4260 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4262 struct scsi_cmnd *cmd = c->scsi_cmd;
4263 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
4264 struct ioaccel2_sg_element *curr_sg;
4266 struct scatterlist *sg;
4271 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
4273 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
4274 atomic_dec(&phys_disk->ioaccel_cmds_out);
4275 return IO_ACCEL_INELIGIBLE;
4278 c->cmd_type = CMD_IOACCEL2;
4279 /* Adjust the DMA address to point to the accelerated command buffer */
4280 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
4281 (c->cmdindex * sizeof(*cp));
4282 BUG_ON(c->busaddr & 0x0000007F);
4284 memset(cp, 0, sizeof(*cp));
4285 cp->IU_type = IOACCEL2_IU_TYPE;
4287 use_sg = scsi_dma_map(cmd);
4289 atomic_dec(&phys_disk->ioaccel_cmds_out);
4295 if (use_sg > h->ioaccel_maxsg) {
4296 addr64 = le64_to_cpu(
4297 h->ioaccel2_cmd_sg_list[c->cmdindex]->address);
4298 curr_sg->address = cpu_to_le64(addr64);
4299 curr_sg->length = 0;
4300 curr_sg->reserved[0] = 0;
4301 curr_sg->reserved[1] = 0;
4302 curr_sg->reserved[2] = 0;
4303 curr_sg->chain_indicator = 0x80;
4305 curr_sg = h->ioaccel2_cmd_sg_list[c->cmdindex];
4307 scsi_for_each_sg(cmd, sg, use_sg, i) {
4308 addr64 = (u64) sg_dma_address(sg);
4309 len = sg_dma_len(sg);
4311 curr_sg->address = cpu_to_le64(addr64);
4312 curr_sg->length = cpu_to_le32(len);
4313 curr_sg->reserved[0] = 0;
4314 curr_sg->reserved[1] = 0;
4315 curr_sg->reserved[2] = 0;
4316 curr_sg->chain_indicator = 0;
4320 switch (cmd->sc_data_direction) {
4322 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4323 cp->direction |= IOACCEL2_DIR_DATA_OUT;
4325 case DMA_FROM_DEVICE:
4326 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4327 cp->direction |= IOACCEL2_DIR_DATA_IN;
4330 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4331 cp->direction |= IOACCEL2_DIR_NO_DATA;
4334 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4335 cmd->sc_data_direction);
4340 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
4341 cp->direction |= IOACCEL2_DIR_NO_DATA;
4344 /* Set encryption parameters, if necessary */
4345 set_encrypt_ioaccel2(h, c, cp);
4347 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
4348 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
4349 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
4351 cp->data_len = cpu_to_le32(total_len);
4352 cp->err_ptr = cpu_to_le64(c->busaddr +
4353 offsetof(struct io_accel2_cmd, error_data));
4354 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
4356 /* fill in sg elements */
4357 if (use_sg > h->ioaccel_maxsg) {
4359 cp->sg[0].length = cpu_to_le32(use_sg * sizeof(cp->sg[0]));
4360 if (hpsa_map_ioaccel2_sg_chain_block(h, cp, c)) {
4361 atomic_dec(&phys_disk->ioaccel_cmds_out);
4362 scsi_dma_unmap(cmd);
4366 cp->sg_count = (u8) use_sg;
4368 enqueue_cmd_and_start_io(h, c);
4373 * Queue a command to the correct I/O accelerator path.
4375 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
4376 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
4377 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
4379 /* Try to honor the device's queue depth */
4380 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
4381 phys_disk->queue_depth) {
4382 atomic_dec(&phys_disk->ioaccel_cmds_out);
4383 return IO_ACCEL_INELIGIBLE;
4385 if (h->transMethod & CFGTBL_Trans_io_accel1)
4386 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
4387 cdb, cdb_len, scsi3addr,
4390 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
4391 cdb, cdb_len, scsi3addr,
4395 static void raid_map_helper(struct raid_map_data *map,
4396 int offload_to_mirror, u32 *map_index, u32 *current_group)
4398 if (offload_to_mirror == 0) {
4399 /* use physical disk in the first mirrored group. */
4400 *map_index %= le16_to_cpu(map->data_disks_per_row);
4404 /* determine mirror group that *map_index indicates */
4405 *current_group = *map_index /
4406 le16_to_cpu(map->data_disks_per_row);
4407 if (offload_to_mirror == *current_group)
4409 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
4410 /* select map index from next group */
4411 *map_index += le16_to_cpu(map->data_disks_per_row);
4414 /* select map index from first group */
4415 *map_index %= le16_to_cpu(map->data_disks_per_row);
4418 } while (offload_to_mirror != *current_group);
4422 * Attempt to perform offload RAID mapping for a logical volume I/O.
4424 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
4425 struct CommandList *c)
4427 struct scsi_cmnd *cmd = c->scsi_cmd;
4428 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4429 struct raid_map_data *map = &dev->raid_map;
4430 struct raid_map_disk_data *dd = &map->data[0];
4433 u64 first_block, last_block;
4436 u64 first_row, last_row;
4437 u32 first_row_offset, last_row_offset;
4438 u32 first_column, last_column;
4439 u64 r0_first_row, r0_last_row;
4440 u32 r5or6_blocks_per_row;
4441 u64 r5or6_first_row, r5or6_last_row;
4442 u32 r5or6_first_row_offset, r5or6_last_row_offset;
4443 u32 r5or6_first_column, r5or6_last_column;
4444 u32 total_disks_per_row;
4446 u32 first_group, last_group, current_group;
4454 #if BITS_PER_LONG == 32
4457 int offload_to_mirror;
4459 /* check for valid opcode, get LBA and block count */
4460 switch (cmd->cmnd[0]) {
4464 first_block = get_unaligned_be16(&cmd->cmnd[2]);
4465 block_cnt = cmd->cmnd[4];
4473 (((u64) cmd->cmnd[2]) << 24) |
4474 (((u64) cmd->cmnd[3]) << 16) |
4475 (((u64) cmd->cmnd[4]) << 8) |
4478 (((u32) cmd->cmnd[7]) << 8) |
4485 (((u64) cmd->cmnd[2]) << 24) |
4486 (((u64) cmd->cmnd[3]) << 16) |
4487 (((u64) cmd->cmnd[4]) << 8) |
4490 (((u32) cmd->cmnd[6]) << 24) |
4491 (((u32) cmd->cmnd[7]) << 16) |
4492 (((u32) cmd->cmnd[8]) << 8) |
4499 (((u64) cmd->cmnd[2]) << 56) |
4500 (((u64) cmd->cmnd[3]) << 48) |
4501 (((u64) cmd->cmnd[4]) << 40) |
4502 (((u64) cmd->cmnd[5]) << 32) |
4503 (((u64) cmd->cmnd[6]) << 24) |
4504 (((u64) cmd->cmnd[7]) << 16) |
4505 (((u64) cmd->cmnd[8]) << 8) |
4508 (((u32) cmd->cmnd[10]) << 24) |
4509 (((u32) cmd->cmnd[11]) << 16) |
4510 (((u32) cmd->cmnd[12]) << 8) |
4514 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
4516 last_block = first_block + block_cnt - 1;
4518 /* check for write to non-RAID-0 */
4519 if (is_write && dev->raid_level != 0)
4520 return IO_ACCEL_INELIGIBLE;
4522 /* check for invalid block or wraparound */
4523 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
4524 last_block < first_block)
4525 return IO_ACCEL_INELIGIBLE;
4527 /* calculate stripe information for the request */
4528 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
4529 le16_to_cpu(map->strip_size);
4530 strip_size = le16_to_cpu(map->strip_size);
4531 #if BITS_PER_LONG == 32
4532 tmpdiv = first_block;
4533 (void) do_div(tmpdiv, blocks_per_row);
4535 tmpdiv = last_block;
4536 (void) do_div(tmpdiv, blocks_per_row);
4538 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4539 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4540 tmpdiv = first_row_offset;
4541 (void) do_div(tmpdiv, strip_size);
4542 first_column = tmpdiv;
4543 tmpdiv = last_row_offset;
4544 (void) do_div(tmpdiv, strip_size);
4545 last_column = tmpdiv;
4547 first_row = first_block / blocks_per_row;
4548 last_row = last_block / blocks_per_row;
4549 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
4550 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
4551 first_column = first_row_offset / strip_size;
4552 last_column = last_row_offset / strip_size;
4555 /* if this isn't a single row/column then give to the controller */
4556 if ((first_row != last_row) || (first_column != last_column))
4557 return IO_ACCEL_INELIGIBLE;
4559 /* proceeding with driver mapping */
4560 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
4561 le16_to_cpu(map->metadata_disks_per_row);
4562 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4563 le16_to_cpu(map->row_cnt);
4564 map_index = (map_row * total_disks_per_row) + first_column;
4566 switch (dev->raid_level) {
4568 break; /* nothing special to do */
4570 /* Handles load balance across RAID 1 members.
4571 * (2-drive R1 and R10 with even # of drives.)
4572 * Appropriate for SSDs, not optimal for HDDs
4574 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
4575 if (dev->offload_to_mirror)
4576 map_index += le16_to_cpu(map->data_disks_per_row);
4577 dev->offload_to_mirror = !dev->offload_to_mirror;
4580 /* Handles N-way mirrors (R1-ADM)
4581 * and R10 with # of drives divisible by 3.)
4583 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
4585 offload_to_mirror = dev->offload_to_mirror;
4586 raid_map_helper(map, offload_to_mirror,
4587 &map_index, ¤t_group);
4588 /* set mirror group to use next time */
4590 (offload_to_mirror >=
4591 le16_to_cpu(map->layout_map_count) - 1)
4592 ? 0 : offload_to_mirror + 1;
4593 dev->offload_to_mirror = offload_to_mirror;
4594 /* Avoid direct use of dev->offload_to_mirror within this
4595 * function since multiple threads might simultaneously
4596 * increment it beyond the range of dev->layout_map_count -1.
4601 if (le16_to_cpu(map->layout_map_count) <= 1)
4604 /* Verify first and last block are in same RAID group */
4605 r5or6_blocks_per_row =
4606 le16_to_cpu(map->strip_size) *
4607 le16_to_cpu(map->data_disks_per_row);
4608 BUG_ON(r5or6_blocks_per_row == 0);
4609 stripesize = r5or6_blocks_per_row *
4610 le16_to_cpu(map->layout_map_count);
4611 #if BITS_PER_LONG == 32
4612 tmpdiv = first_block;
4613 first_group = do_div(tmpdiv, stripesize);
4614 tmpdiv = first_group;
4615 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4616 first_group = tmpdiv;
4617 tmpdiv = last_block;
4618 last_group = do_div(tmpdiv, stripesize);
4619 tmpdiv = last_group;
4620 (void) do_div(tmpdiv, r5or6_blocks_per_row);
4621 last_group = tmpdiv;
4623 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
4624 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
4626 if (first_group != last_group)
4627 return IO_ACCEL_INELIGIBLE;
4629 /* Verify request is in a single row of RAID 5/6 */
4630 #if BITS_PER_LONG == 32
4631 tmpdiv = first_block;
4632 (void) do_div(tmpdiv, stripesize);
4633 first_row = r5or6_first_row = r0_first_row = tmpdiv;
4634 tmpdiv = last_block;
4635 (void) do_div(tmpdiv, stripesize);
4636 r5or6_last_row = r0_last_row = tmpdiv;
4638 first_row = r5or6_first_row = r0_first_row =
4639 first_block / stripesize;
4640 r5or6_last_row = r0_last_row = last_block / stripesize;
4642 if (r5or6_first_row != r5or6_last_row)
4643 return IO_ACCEL_INELIGIBLE;
4646 /* Verify request is in a single column */
4647 #if BITS_PER_LONG == 32
4648 tmpdiv = first_block;
4649 first_row_offset = do_div(tmpdiv, stripesize);
4650 tmpdiv = first_row_offset;
4651 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
4652 r5or6_first_row_offset = first_row_offset;
4653 tmpdiv = last_block;
4654 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
4655 tmpdiv = r5or6_last_row_offset;
4656 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
4657 tmpdiv = r5or6_first_row_offset;
4658 (void) do_div(tmpdiv, map->strip_size);
4659 first_column = r5or6_first_column = tmpdiv;
4660 tmpdiv = r5or6_last_row_offset;
4661 (void) do_div(tmpdiv, map->strip_size);
4662 r5or6_last_column = tmpdiv;
4664 first_row_offset = r5or6_first_row_offset =
4665 (u32)((first_block % stripesize) %
4666 r5or6_blocks_per_row);
4668 r5or6_last_row_offset =
4669 (u32)((last_block % stripesize) %
4670 r5or6_blocks_per_row);
4672 first_column = r5or6_first_column =
4673 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
4675 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
4677 if (r5or6_first_column != r5or6_last_column)
4678 return IO_ACCEL_INELIGIBLE;
4680 /* Request is eligible */
4681 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
4682 le16_to_cpu(map->row_cnt);
4684 map_index = (first_group *
4685 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
4686 (map_row * total_disks_per_row) + first_column;
4689 return IO_ACCEL_INELIGIBLE;
4692 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
4693 return IO_ACCEL_INELIGIBLE;
4695 c->phys_disk = dev->phys_disk[map_index];
4697 disk_handle = dd[map_index].ioaccel_handle;
4698 disk_block = le64_to_cpu(map->disk_starting_blk) +
4699 first_row * le16_to_cpu(map->strip_size) +
4700 (first_row_offset - first_column *
4701 le16_to_cpu(map->strip_size));
4702 disk_block_cnt = block_cnt;
4704 /* handle differing logical/physical block sizes */
4705 if (map->phys_blk_shift) {
4706 disk_block <<= map->phys_blk_shift;
4707 disk_block_cnt <<= map->phys_blk_shift;
4709 BUG_ON(disk_block_cnt > 0xffff);
4711 /* build the new CDB for the physical disk I/O */
4712 if (disk_block > 0xffffffff) {
4713 cdb[0] = is_write ? WRITE_16 : READ_16;
4715 cdb[2] = (u8) (disk_block >> 56);
4716 cdb[3] = (u8) (disk_block >> 48);
4717 cdb[4] = (u8) (disk_block >> 40);
4718 cdb[5] = (u8) (disk_block >> 32);
4719 cdb[6] = (u8) (disk_block >> 24);
4720 cdb[7] = (u8) (disk_block >> 16);
4721 cdb[8] = (u8) (disk_block >> 8);
4722 cdb[9] = (u8) (disk_block);
4723 cdb[10] = (u8) (disk_block_cnt >> 24);
4724 cdb[11] = (u8) (disk_block_cnt >> 16);
4725 cdb[12] = (u8) (disk_block_cnt >> 8);
4726 cdb[13] = (u8) (disk_block_cnt);
4731 cdb[0] = is_write ? WRITE_10 : READ_10;
4733 cdb[2] = (u8) (disk_block >> 24);
4734 cdb[3] = (u8) (disk_block >> 16);
4735 cdb[4] = (u8) (disk_block >> 8);
4736 cdb[5] = (u8) (disk_block);
4738 cdb[7] = (u8) (disk_block_cnt >> 8);
4739 cdb[8] = (u8) (disk_block_cnt);
4743 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
4745 dev->phys_disk[map_index]);
4749 * Submit commands down the "normal" RAID stack path
4750 * All callers to hpsa_ciss_submit must check lockup_detected
4751 * beforehand, before (opt.) and after calling cmd_alloc
4753 static int hpsa_ciss_submit(struct ctlr_info *h,
4754 struct CommandList *c, struct scsi_cmnd *cmd,
4755 unsigned char scsi3addr[])
4757 cmd->host_scribble = (unsigned char *) c;
4758 c->cmd_type = CMD_SCSI;
4760 c->Header.ReplyQueue = 0; /* unused in simple mode */
4761 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
4762 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
4764 /* Fill in the request block... */
4766 c->Request.Timeout = 0;
4767 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
4768 c->Request.CDBLen = cmd->cmd_len;
4769 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4770 switch (cmd->sc_data_direction) {
4772 c->Request.type_attr_dir =
4773 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4775 case DMA_FROM_DEVICE:
4776 c->Request.type_attr_dir =
4777 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4780 c->Request.type_attr_dir =
4781 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4783 case DMA_BIDIRECTIONAL:
4784 /* This can happen if a buggy application does a scsi passthru
4785 * and sets both inlen and outlen to non-zero. ( see
4786 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4789 c->Request.type_attr_dir =
4790 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4791 /* This is technically wrong, and hpsa controllers should
4792 * reject it with CMD_INVALID, which is the most correct
4793 * response, but non-fibre backends appear to let it
4794 * slide by, and give the same results as if this field
4795 * were set correctly. Either way is acceptable for
4796 * our purposes here.
4802 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4803 cmd->sc_data_direction);
4808 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4809 hpsa_cmd_resolve_and_free(h, c);
4810 return SCSI_MLQUEUE_HOST_BUSY;
4812 enqueue_cmd_and_start_io(h, c);
4813 /* the cmd'll come back via intr handler in complete_scsi_command() */
4817 static void hpsa_cmd_init(struct ctlr_info *h, int index,
4818 struct CommandList *c)
4820 dma_addr_t cmd_dma_handle, err_dma_handle;
4822 /* Zero out all of commandlist except the last field, refcount */
4823 memset(c, 0, offsetof(struct CommandList, refcount));
4824 c->Header.tag = cpu_to_le64((u64) (index << DIRECT_LOOKUP_SHIFT));
4825 cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4826 c->err_info = h->errinfo_pool + index;
4827 memset(c->err_info, 0, sizeof(*c->err_info));
4828 err_dma_handle = h->errinfo_pool_dhandle
4829 + index * sizeof(*c->err_info);
4830 c->cmdindex = index;
4831 c->busaddr = (u32) cmd_dma_handle;
4832 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4833 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4835 c->scsi_cmd = SCSI_CMD_IDLE;
4838 static void hpsa_preinitialize_commands(struct ctlr_info *h)
4842 for (i = 0; i < h->nr_cmds; i++) {
4843 struct CommandList *c = h->cmd_pool + i;
4845 hpsa_cmd_init(h, i, c);
4846 atomic_set(&c->refcount, 0);
4850 static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
4851 struct CommandList *c)
4853 dma_addr_t cmd_dma_handle = h->cmd_pool_dhandle + index * sizeof(*c);
4855 BUG_ON(c->cmdindex != index);
4857 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
4858 memset(c->err_info, 0, sizeof(*c->err_info));
4859 c->busaddr = (u32) cmd_dma_handle;
4862 static int hpsa_ioaccel_submit(struct ctlr_info *h,
4863 struct CommandList *c, struct scsi_cmnd *cmd,
4864 unsigned char *scsi3addr)
4866 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
4867 int rc = IO_ACCEL_INELIGIBLE;
4869 cmd->host_scribble = (unsigned char *) c;
4871 if (dev->offload_enabled) {
4872 hpsa_cmd_init(h, c->cmdindex, c);
4873 c->cmd_type = CMD_SCSI;
4875 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4876 if (rc < 0) /* scsi_dma_map failed. */
4877 rc = SCSI_MLQUEUE_HOST_BUSY;
4878 } else if (dev->hba_ioaccel_enabled) {
4879 hpsa_cmd_init(h, c->cmdindex, c);
4880 c->cmd_type = CMD_SCSI;
4882 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4883 if (rc < 0) /* scsi_dma_map failed. */
4884 rc = SCSI_MLQUEUE_HOST_BUSY;
4889 static void hpsa_command_resubmit_worker(struct work_struct *work)
4891 struct scsi_cmnd *cmd;
4892 struct hpsa_scsi_dev_t *dev;
4893 struct CommandList *c = container_of(work, struct CommandList, work);
4896 dev = cmd->device->hostdata;
4898 cmd->result = DID_NO_CONNECT << 16;
4899 return hpsa_cmd_free_and_done(c->h, c, cmd);
4901 if (c->reset_pending)
4902 return hpsa_cmd_resolve_and_free(c->h, c);
4903 if (c->abort_pending)
4904 return hpsa_cmd_abort_and_free(c->h, c, cmd);
4905 if (c->cmd_type == CMD_IOACCEL2) {
4906 struct ctlr_info *h = c->h;
4907 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
4910 if (c2->error_data.serv_response ==
4911 IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
4912 rc = hpsa_ioaccel_submit(h, c, cmd, dev->scsi3addr);
4915 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4917 * If we get here, it means dma mapping failed.
4918 * Try again via scsi mid layer, which will
4919 * then get SCSI_MLQUEUE_HOST_BUSY.
4921 cmd->result = DID_IMM_RETRY << 16;
4922 return hpsa_cmd_free_and_done(h, c, cmd);
4924 /* else, fall thru and resubmit down CISS path */
4927 hpsa_cmd_partial_init(c->h, c->cmdindex, c);
4928 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4930 * If we get here, it means dma mapping failed. Try
4931 * again via scsi mid layer, which will then get
4932 * SCSI_MLQUEUE_HOST_BUSY.
4934 * hpsa_ciss_submit will have already freed c
4935 * if it encountered a dma mapping failure.
4937 cmd->result = DID_IMM_RETRY << 16;
4938 cmd->scsi_done(cmd);
4942 /* Running in struct Scsi_Host->host_lock less mode */
4943 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4945 struct ctlr_info *h;
4946 struct hpsa_scsi_dev_t *dev;
4947 unsigned char scsi3addr[8];
4948 struct CommandList *c;
4951 /* Get the ptr to our adapter structure out of cmd->host. */
4952 h = sdev_to_hba(cmd->device);
4954 BUG_ON(cmd->request->tag < 0);
4956 dev = cmd->device->hostdata;
4958 cmd->result = DID_NO_CONNECT << 16;
4959 cmd->scsi_done(cmd);
4963 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4965 if (unlikely(lockup_detected(h))) {
4966 cmd->result = DID_NO_CONNECT << 16;
4967 cmd->scsi_done(cmd);
4970 c = cmd_tagged_alloc(h, cmd);
4973 * Call alternate submit routine for I/O accelerated commands.
4974 * Retries always go down the normal I/O path.
4976 if (likely(cmd->retries == 0 &&
4977 cmd->request->cmd_type == REQ_TYPE_FS &&
4978 h->acciopath_status)) {
4979 rc = hpsa_ioaccel_submit(h, c, cmd, scsi3addr);
4982 if (rc == SCSI_MLQUEUE_HOST_BUSY) {
4983 hpsa_cmd_resolve_and_free(h, c);
4984 return SCSI_MLQUEUE_HOST_BUSY;
4987 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4990 static void hpsa_scan_complete(struct ctlr_info *h)
4992 unsigned long flags;
4994 spin_lock_irqsave(&h->scan_lock, flags);
4995 h->scan_finished = 1;
4996 wake_up_all(&h->scan_wait_queue);
4997 spin_unlock_irqrestore(&h->scan_lock, flags);
5000 static void hpsa_scan_start(struct Scsi_Host *sh)
5002 struct ctlr_info *h = shost_to_hba(sh);
5003 unsigned long flags;
5006 * Don't let rescans be initiated on a controller known to be locked
5007 * up. If the controller locks up *during* a rescan, that thread is
5008 * probably hosed, but at least we can prevent new rescan threads from
5009 * piling up on a locked up controller.
5011 if (unlikely(lockup_detected(h)))
5012 return hpsa_scan_complete(h);
5014 /* wait until any scan already in progress is finished. */
5016 spin_lock_irqsave(&h->scan_lock, flags);
5017 if (h->scan_finished)
5019 spin_unlock_irqrestore(&h->scan_lock, flags);
5020 wait_event(h->scan_wait_queue, h->scan_finished);
5021 /* Note: We don't need to worry about a race between this
5022 * thread and driver unload because the midlayer will
5023 * have incremented the reference count, so unload won't
5024 * happen if we're in here.
5027 h->scan_finished = 0; /* mark scan as in progress */
5028 spin_unlock_irqrestore(&h->scan_lock, flags);
5030 if (unlikely(lockup_detected(h)))
5031 return hpsa_scan_complete(h);
5033 hpsa_update_scsi_devices(h);
5035 hpsa_scan_complete(h);
5038 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
5040 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
5047 else if (qdepth > logical_drive->queue_depth)
5048 qdepth = logical_drive->queue_depth;
5050 return scsi_change_queue_depth(sdev, qdepth);
5053 static int hpsa_scan_finished(struct Scsi_Host *sh,
5054 unsigned long elapsed_time)
5056 struct ctlr_info *h = shost_to_hba(sh);
5057 unsigned long flags;
5060 spin_lock_irqsave(&h->scan_lock, flags);
5061 finished = h->scan_finished;
5062 spin_unlock_irqrestore(&h->scan_lock, flags);
5066 static int hpsa_scsi_host_alloc(struct ctlr_info *h)
5068 struct Scsi_Host *sh;
5071 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
5073 dev_err(&h->pdev->dev, "scsi_host_alloc failed\n");
5080 sh->max_channel = 3;
5081 sh->max_cmd_len = MAX_COMMAND_SIZE;
5082 sh->max_lun = HPSA_MAX_LUN;
5083 sh->max_id = HPSA_MAX_LUN;
5084 sh->can_queue = h->nr_cmds - HPSA_NRESERVED_CMDS;
5085 sh->cmd_per_lun = sh->can_queue;
5086 sh->sg_tablesize = h->maxsgentries;
5087 sh->hostdata[0] = (unsigned long) h;
5088 sh->irq = h->intr[h->intr_mode];
5089 sh->unique_id = sh->irq;
5090 error = scsi_init_shared_tag_map(sh, sh->can_queue);
5092 dev_err(&h->pdev->dev,
5093 "%s: scsi_init_shared_tag_map failed for controller %d\n",
5102 static int hpsa_scsi_add_host(struct ctlr_info *h)
5106 rv = scsi_add_host(h->scsi_host, &h->pdev->dev);
5108 dev_err(&h->pdev->dev, "scsi_add_host failed\n");
5111 scsi_scan_host(h->scsi_host);
5116 * The block layer has already gone to the trouble of picking out a unique,
5117 * small-integer tag for this request. We use an offset from that value as
5118 * an index to select our command block. (The offset allows us to reserve the
5119 * low-numbered entries for our own uses.)
5121 static int hpsa_get_cmd_index(struct scsi_cmnd *scmd)
5123 int idx = scmd->request->tag;
5128 /* Offset to leave space for internal cmds. */
5129 return idx += HPSA_NRESERVED_CMDS;
5133 * Send a TEST_UNIT_READY command to the specified LUN using the specified
5134 * reply queue; returns zero if the unit is ready, and non-zero otherwise.
5136 static int hpsa_send_test_unit_ready(struct ctlr_info *h,
5137 struct CommandList *c, unsigned char lunaddr[],
5142 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
5143 (void) fill_cmd(c, TEST_UNIT_READY, h,
5144 NULL, 0, 0, lunaddr, TYPE_CMD);
5145 rc = hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5148 /* no unmap needed here because no data xfer. */
5150 /* Check if the unit is already ready. */
5151 if (c->err_info->CommandStatus == CMD_SUCCESS)
5155 * The first command sent after reset will receive "unit attention" to
5156 * indicate that the LUN has been reset...this is actually what we're
5157 * looking for (but, success is good too).
5159 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5160 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
5161 (c->err_info->SenseInfo[2] == NO_SENSE ||
5162 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
5169 * Wait for a TEST_UNIT_READY command to complete, retrying as necessary;
5170 * returns zero when the unit is ready, and non-zero when giving up.
5172 static int hpsa_wait_for_test_unit_ready(struct ctlr_info *h,
5173 struct CommandList *c,
5174 unsigned char lunaddr[], int reply_queue)
5178 int waittime = 1; /* seconds */
5180 /* Send test unit ready until device ready, or give up. */
5181 for (count = 0; count < HPSA_TUR_RETRY_LIMIT; count++) {
5184 * Wait for a bit. do this first, because if we send
5185 * the TUR right away, the reset will just abort it.
5187 msleep(1000 * waittime);
5189 rc = hpsa_send_test_unit_ready(h, c, lunaddr, reply_queue);
5193 /* Increase wait time with each try, up to a point. */
5194 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
5197 dev_warn(&h->pdev->dev,
5198 "waiting %d secs for device to become ready.\n",
5205 static int wait_for_device_to_become_ready(struct ctlr_info *h,
5206 unsigned char lunaddr[],
5213 struct CommandList *c;
5218 * If no specific reply queue was requested, then send the TUR
5219 * repeatedly, requesting a reply on each reply queue; otherwise execute
5220 * the loop exactly once using only the specified queue.
5222 if (reply_queue == DEFAULT_REPLY_QUEUE) {
5224 last_queue = h->nreply_queues - 1;
5226 first_queue = reply_queue;
5227 last_queue = reply_queue;
5230 for (rq = first_queue; rq <= last_queue; rq++) {
5231 rc = hpsa_wait_for_test_unit_ready(h, c, lunaddr, rq);
5237 dev_warn(&h->pdev->dev, "giving up on device.\n");
5239 dev_warn(&h->pdev->dev, "device is ready.\n");
5245 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
5246 * complaining. Doing a host- or bus-reset can't do anything good here.
5248 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
5251 struct ctlr_info *h;
5252 struct hpsa_scsi_dev_t *dev;
5256 /* find the controller to which the command to be aborted was sent */
5257 h = sdev_to_hba(scsicmd->device);
5258 if (h == NULL) /* paranoia */
5261 if (lockup_detected(h))
5264 dev = scsicmd->device->hostdata;
5266 dev_err(&h->pdev->dev, "%s: device lookup failed\n", __func__);
5270 /* if controller locked up, we can guarantee command won't complete */
5271 if (lockup_detected(h)) {
5272 snprintf(msg, sizeof(msg),
5273 "cmd %d RESET FAILED, lockup detected",
5274 hpsa_get_cmd_index(scsicmd));
5275 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5279 /* this reset request might be the result of a lockup; check */
5280 if (detect_controller_lockup(h)) {
5281 snprintf(msg, sizeof(msg),
5282 "cmd %d RESET FAILED, new lockup detected",
5283 hpsa_get_cmd_index(scsicmd));
5284 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5288 /* Do not attempt on controller */
5289 if (is_hba_lunid(dev->scsi3addr))
5292 if (is_logical_dev_addr_mode(dev->scsi3addr))
5293 reset_type = HPSA_DEVICE_RESET_MSG;
5295 reset_type = HPSA_PHYS_TARGET_RESET;
5297 sprintf(msg, "resetting %s",
5298 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ");
5299 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5301 h->reset_in_progress = 1;
5303 /* send a reset to the SCSI LUN which the command was sent to */
5304 rc = hpsa_do_reset(h, dev, dev->scsi3addr, reset_type,
5305 DEFAULT_REPLY_QUEUE);
5306 sprintf(msg, "reset %s %s",
5307 reset_type == HPSA_DEVICE_RESET_MSG ? "logical " : "physical ",
5308 rc == 0 ? "completed successfully" : "failed");
5309 hpsa_show_dev_msg(KERN_WARNING, h, dev, msg);
5310 h->reset_in_progress = 0;
5311 return rc == 0 ? SUCCESS : FAILED;
5314 static void swizzle_abort_tag(u8 *tag)
5318 memcpy(original_tag, tag, 8);
5319 tag[0] = original_tag[3];
5320 tag[1] = original_tag[2];
5321 tag[2] = original_tag[1];
5322 tag[3] = original_tag[0];
5323 tag[4] = original_tag[7];
5324 tag[5] = original_tag[6];
5325 tag[6] = original_tag[5];
5326 tag[7] = original_tag[4];
5329 static void hpsa_get_tag(struct ctlr_info *h,
5330 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
5333 if (c->cmd_type == CMD_IOACCEL1) {
5334 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
5335 &h->ioaccel_cmd_pool[c->cmdindex];
5336 tag = le64_to_cpu(cm1->tag);
5337 *tagupper = cpu_to_le32(tag >> 32);
5338 *taglower = cpu_to_le32(tag);
5341 if (c->cmd_type == CMD_IOACCEL2) {
5342 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
5343 &h->ioaccel2_cmd_pool[c->cmdindex];
5344 /* upper tag not used in ioaccel2 mode */
5345 memset(tagupper, 0, sizeof(*tagupper));
5346 *taglower = cm2->Tag;
5349 tag = le64_to_cpu(c->Header.tag);
5350 *tagupper = cpu_to_le32(tag >> 32);
5351 *taglower = cpu_to_le32(tag);
5354 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
5355 struct CommandList *abort, int reply_queue)
5358 struct CommandList *c;
5359 struct ErrorInfo *ei;
5360 __le32 tagupper, taglower;
5364 /* fill_cmd can't fail here, no buffer to map */
5365 (void) fill_cmd(c, HPSA_ABORT_MSG, h, &abort->Header.tag,
5366 0, 0, scsi3addr, TYPE_MSG);
5367 if (h->needs_abort_tags_swizzled)
5368 swizzle_abort_tag(&c->Request.CDB[4]);
5369 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5370 hpsa_get_tag(h, abort, &taglower, &tagupper);
5371 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd(abort) completed.\n",
5372 __func__, tagupper, taglower);
5373 /* no unmap needed here because no data xfer. */
5376 switch (ei->CommandStatus) {
5379 case CMD_TMF_STATUS:
5380 rc = hpsa_evaluate_tmf_status(h, c);
5382 case CMD_UNABORTABLE: /* Very common, don't make noise. */
5386 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
5387 __func__, tagupper, taglower);
5388 hpsa_scsi_interpret_error(h, c);
5393 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
5394 __func__, tagupper, taglower);
5398 static void setup_ioaccel2_abort_cmd(struct CommandList *c, struct ctlr_info *h,
5399 struct CommandList *command_to_abort, int reply_queue)
5401 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5402 struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *) c2;
5403 struct io_accel2_cmd *c2a =
5404 &h->ioaccel2_cmd_pool[command_to_abort->cmdindex];
5405 struct scsi_cmnd *scmd = command_to_abort->scsi_cmd;
5406 struct hpsa_scsi_dev_t *dev = scmd->device->hostdata;
5409 * We're overlaying struct hpsa_tmf_struct on top of something which
5410 * was allocated as a struct io_accel2_cmd, so we better be sure it
5411 * actually fits, and doesn't overrun the error info space.
5413 BUILD_BUG_ON(sizeof(struct hpsa_tmf_struct) >
5414 sizeof(struct io_accel2_cmd));
5415 BUG_ON(offsetof(struct io_accel2_cmd, error_data) <
5416 offsetof(struct hpsa_tmf_struct, error_len) +
5417 sizeof(ac->error_len));
5419 c->cmd_type = IOACCEL2_TMF;
5420 c->scsi_cmd = SCSI_CMD_BUSY;
5422 /* Adjust the DMA address to point to the accelerated command buffer */
5423 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
5424 (c->cmdindex * sizeof(struct io_accel2_cmd));
5425 BUG_ON(c->busaddr & 0x0000007F);
5427 memset(ac, 0, sizeof(*c2)); /* yes this is correct */
5428 ac->iu_type = IOACCEL2_IU_TMF_TYPE;
5429 ac->reply_queue = reply_queue;
5430 ac->tmf = IOACCEL2_TMF_ABORT;
5431 ac->it_nexus = cpu_to_le32(dev->ioaccel_handle);
5432 memset(ac->lun_id, 0, sizeof(ac->lun_id));
5433 ac->tag = cpu_to_le64(c->cmdindex << DIRECT_LOOKUP_SHIFT);
5434 ac->abort_tag = cpu_to_le64(le32_to_cpu(c2a->Tag));
5435 ac->error_ptr = cpu_to_le64(c->busaddr +
5436 offsetof(struct io_accel2_cmd, error_data));
5437 ac->error_len = cpu_to_le32(sizeof(c2->error_data));
5440 /* ioaccel2 path firmware cannot handle abort task requests.
5441 * Change abort requests to physical target reset, and send to the
5442 * address of the physical disk used for the ioaccel 2 command.
5443 * Return 0 on success (IO_OK)
5447 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
5448 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5451 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
5452 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
5453 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
5454 unsigned char *psa = &phys_scsi3addr[0];
5456 /* Get a pointer to the hpsa logical device. */
5457 scmd = abort->scsi_cmd;
5458 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
5460 dev_warn(&h->pdev->dev,
5461 "Cannot abort: no device pointer for command.\n");
5462 return -1; /* not abortable */
5465 if (h->raid_offload_debug > 0)
5466 dev_info(&h->pdev->dev,
5467 "scsi %d:%d:%d:%d %s scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5468 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
5470 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
5471 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
5473 if (!dev->offload_enabled) {
5474 dev_warn(&h->pdev->dev,
5475 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
5476 return -1; /* not abortable */
5479 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
5480 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
5481 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
5482 return -1; /* not abortable */
5485 /* send the reset */
5486 if (h->raid_offload_debug > 0)
5487 dev_info(&h->pdev->dev,
5488 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5489 psa[0], psa[1], psa[2], psa[3],
5490 psa[4], psa[5], psa[6], psa[7]);
5491 rc = hpsa_do_reset(h, dev, psa, HPSA_RESET_TYPE_TARGET, reply_queue);
5493 dev_warn(&h->pdev->dev,
5494 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5495 psa[0], psa[1], psa[2], psa[3],
5496 psa[4], psa[5], psa[6], psa[7]);
5497 return rc; /* failed to reset */
5500 /* wait for device to recover */
5501 if (wait_for_device_to_become_ready(h, psa, reply_queue) != 0) {
5502 dev_warn(&h->pdev->dev,
5503 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5504 psa[0], psa[1], psa[2], psa[3],
5505 psa[4], psa[5], psa[6], psa[7]);
5506 return -1; /* failed to recover */
5509 /* device recovered */
5510 dev_info(&h->pdev->dev,
5511 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
5512 psa[0], psa[1], psa[2], psa[3],
5513 psa[4], psa[5], psa[6], psa[7]);
5515 return rc; /* success */
5518 static int hpsa_send_abort_ioaccel2(struct ctlr_info *h,
5519 struct CommandList *abort, int reply_queue)
5522 struct CommandList *c;
5523 __le32 taglower, tagupper;
5524 struct hpsa_scsi_dev_t *dev;
5525 struct io_accel2_cmd *c2;
5527 dev = abort->scsi_cmd->device->hostdata;
5528 if (!dev->offload_enabled && !dev->hba_ioaccel_enabled)
5532 setup_ioaccel2_abort_cmd(c, h, abort, reply_queue);
5533 c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
5534 (void) hpsa_scsi_do_simple_cmd(h, c, reply_queue, NO_TIMEOUT);
5535 hpsa_get_tag(h, abort, &taglower, &tagupper);
5536 dev_dbg(&h->pdev->dev,
5537 "%s: Tag:0x%08x:%08x: do_simple_cmd(ioaccel2 abort) completed.\n",
5538 __func__, tagupper, taglower);
5539 /* no unmap needed here because no data xfer. */
5541 dev_dbg(&h->pdev->dev,
5542 "%s: Tag:0x%08x:%08x: abort service response = 0x%02x.\n",
5543 __func__, tagupper, taglower, c2->error_data.serv_response);
5544 switch (c2->error_data.serv_response) {
5545 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
5546 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
5549 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
5550 case IOACCEL2_SERV_RESPONSE_FAILURE:
5551 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
5555 dev_warn(&h->pdev->dev,
5556 "%s: Tag:0x%08x:%08x: unknown abort service response 0x%02x\n",
5557 __func__, tagupper, taglower,
5558 c2->error_data.serv_response);
5562 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
5563 tagupper, taglower);
5567 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
5568 unsigned char *scsi3addr, struct CommandList *abort, int reply_queue)
5571 * ioccelerator mode 2 commands should be aborted via the
5572 * accelerated path, since RAID path is unaware of these commands,
5573 * but not all underlying firmware can handle abort TMF.
5574 * Change abort to physical device reset when abort TMF is unsupported.
5576 if (abort->cmd_type == CMD_IOACCEL2) {
5577 if (HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags)
5578 return hpsa_send_abort_ioaccel2(h, abort,
5581 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr,
5582 abort, reply_queue);
5584 return hpsa_send_abort(h, scsi3addr, abort, reply_queue);
5587 /* Find out which reply queue a command was meant to return on */
5588 static int hpsa_extract_reply_queue(struct ctlr_info *h,
5589 struct CommandList *c)
5591 if (c->cmd_type == CMD_IOACCEL2)
5592 return h->ioaccel2_cmd_pool[c->cmdindex].reply_queue;
5593 return c->Header.ReplyQueue;
5597 * Limit concurrency of abort commands to prevent
5598 * over-subscription of commands
5600 static inline int wait_for_available_abort_cmd(struct ctlr_info *h)
5602 #define ABORT_CMD_WAIT_MSECS 5000
5603 return !wait_event_timeout(h->abort_cmd_wait_queue,
5604 atomic_dec_if_positive(&h->abort_cmds_available) >= 0,
5605 msecs_to_jiffies(ABORT_CMD_WAIT_MSECS));
5608 /* Send an abort for the specified command.
5609 * If the device and controller support it,
5610 * send a task abort request.
5612 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
5616 struct ctlr_info *h;
5617 struct hpsa_scsi_dev_t *dev;
5618 struct CommandList *abort; /* pointer to command to be aborted */
5619 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
5620 char msg[256]; /* For debug messaging. */
5622 __le32 tagupper, taglower;
5623 int refcount, reply_queue;
5628 if (sc->device == NULL)
5631 /* Find the controller of the command to be aborted */
5632 h = sdev_to_hba(sc->device);
5636 /* Find the device of the command to be aborted */
5637 dev = sc->device->hostdata;
5639 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
5644 /* If controller locked up, we can guarantee command won't complete */
5645 if (lockup_detected(h)) {
5646 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5647 "ABORT FAILED, lockup detected");
5651 /* This is a good time to check if controller lockup has occurred */
5652 if (detect_controller_lockup(h)) {
5653 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5654 "ABORT FAILED, new lockup detected");
5658 /* Check that controller supports some kind of task abort */
5659 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
5660 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5663 memset(msg, 0, sizeof(msg));
5664 ml += sprintf(msg+ml, "scsi %d:%d:%d:%llu %s %p",
5665 h->scsi_host->host_no, sc->device->channel,
5666 sc->device->id, sc->device->lun,
5667 "Aborting command", sc);
5669 /* Get SCSI command to be aborted */
5670 abort = (struct CommandList *) sc->host_scribble;
5671 if (abort == NULL) {
5672 /* This can happen if the command already completed. */
5675 refcount = atomic_inc_return(&abort->refcount);
5676 if (refcount == 1) { /* Command is done already. */
5681 /* Don't bother trying the abort if we know it won't work. */
5682 if (abort->cmd_type != CMD_IOACCEL2 &&
5683 abort->cmd_type != CMD_IOACCEL1 && !dev->supports_aborts) {
5689 * Check that we're aborting the right command.
5690 * It's possible the CommandList already completed and got re-used.
5692 if (abort->scsi_cmd != sc) {
5697 abort->abort_pending = true;
5698 hpsa_get_tag(h, abort, &taglower, &tagupper);
5699 reply_queue = hpsa_extract_reply_queue(h, abort);
5700 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
5701 as = abort->scsi_cmd;
5703 ml += sprintf(msg+ml,
5704 "CDBLen: %d CDB: 0x%02x%02x... SN: 0x%lx ",
5705 as->cmd_len, as->cmnd[0], as->cmnd[1],
5707 dev_warn(&h->pdev->dev, "%s BEING SENT\n", msg);
5708 hpsa_show_dev_msg(KERN_WARNING, h, dev, "Aborting command");
5711 * Command is in flight, or possibly already completed
5712 * by the firmware (but not to the scsi mid layer) but we can't
5713 * distinguish which. Send the abort down.
5715 if (wait_for_available_abort_cmd(h)) {
5716 dev_warn(&h->pdev->dev,
5717 "%s FAILED, timeout waiting for an abort command to become available.\n",
5722 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort, reply_queue);
5723 atomic_inc(&h->abort_cmds_available);
5724 wake_up_all(&h->abort_cmd_wait_queue);
5726 dev_warn(&h->pdev->dev, "%s SENT, FAILED\n", msg);
5727 hpsa_show_dev_msg(KERN_WARNING, h, dev,
5728 "FAILED to abort command");
5732 dev_info(&h->pdev->dev, "%s SENT, SUCCESS\n", msg);
5733 wait_event(h->event_sync_wait_queue,
5734 abort->scsi_cmd != sc || lockup_detected(h));
5736 return !lockup_detected(h) ? SUCCESS : FAILED;
5740 * For operations with an associated SCSI command, a command block is allocated
5741 * at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
5742 * block request tag as an index into a table of entries. cmd_tagged_free() is
5743 * the complement, although cmd_free() may be called instead.
5745 static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
5746 struct scsi_cmnd *scmd)
5748 int idx = hpsa_get_cmd_index(scmd);
5749 struct CommandList *c = h->cmd_pool + idx;
5751 if (idx < HPSA_NRESERVED_CMDS || idx >= h->nr_cmds) {
5752 dev_err(&h->pdev->dev, "Bad block tag: %d not in [%d..%d]\n",
5753 idx, HPSA_NRESERVED_CMDS, h->nr_cmds - 1);
5754 /* The index value comes from the block layer, so if it's out of
5755 * bounds, it's probably not our bug.
5760 atomic_inc(&c->refcount);
5761 if (unlikely(!hpsa_is_cmd_idle(c))) {
5763 * We expect that the SCSI layer will hand us a unique tag
5764 * value. Thus, there should never be a collision here between
5765 * two requests...because if the selected command isn't idle
5766 * then someone is going to be very disappointed.
5768 dev_err(&h->pdev->dev,
5769 "tag collision (tag=%d) in cmd_tagged_alloc().\n",
5771 if (c->scsi_cmd != NULL)
5772 scsi_print_command(c->scsi_cmd);
5773 scsi_print_command(scmd);
5776 hpsa_cmd_partial_init(h, idx, c);
5780 static void cmd_tagged_free(struct ctlr_info *h, struct CommandList *c)
5783 * Release our reference to the block. We don't need to do anything
5784 * else to free it, because it is accessed by index. (There's no point
5785 * in checking the result of the decrement, since we cannot guarantee
5786 * that there isn't a concurrent abort which is also accessing it.)
5788 (void)atomic_dec(&c->refcount);
5792 * For operations that cannot sleep, a command block is allocated at init,
5793 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
5794 * which ones are free or in use. Lock must be held when calling this.
5795 * cmd_free() is the complement.
5796 * This function never gives up and returns NULL. If it hangs,
5797 * another thread must call cmd_free() to free some tags.
5800 static struct CommandList *cmd_alloc(struct ctlr_info *h)
5802 struct CommandList *c;
5807 * There is some *extremely* small but non-zero chance that that
5808 * multiple threads could get in here, and one thread could
5809 * be scanning through the list of bits looking for a free
5810 * one, but the free ones are always behind him, and other
5811 * threads sneak in behind him and eat them before he can
5812 * get to them, so that while there is always a free one, a
5813 * very unlucky thread might be starved anyway, never able to
5814 * beat the other threads. In reality, this happens so
5815 * infrequently as to be indistinguishable from never.
5817 * Note that we start allocating commands before the SCSI host structure
5818 * is initialized. Since the search starts at bit zero, this
5819 * all works, since we have at least one command structure available;
5820 * however, it means that the structures with the low indexes have to be
5821 * reserved for driver-initiated requests, while requests from the block
5822 * layer will use the higher indexes.
5826 i = find_next_zero_bit(h->cmd_pool_bits,
5827 HPSA_NRESERVED_CMDS,
5829 if (unlikely(i >= HPSA_NRESERVED_CMDS)) {
5833 c = h->cmd_pool + i;
5834 refcount = atomic_inc_return(&c->refcount);
5835 if (unlikely(refcount > 1)) {
5836 cmd_free(h, c); /* already in use */
5837 offset = (i + 1) % HPSA_NRESERVED_CMDS;
5840 set_bit(i & (BITS_PER_LONG - 1),
5841 h->cmd_pool_bits + (i / BITS_PER_LONG));
5842 break; /* it's ours now. */
5844 hpsa_cmd_partial_init(h, i, c);
5849 * This is the complementary operation to cmd_alloc(). Note, however, in some
5850 * corner cases it may also be used to free blocks allocated by
5851 * cmd_tagged_alloc() in which case the ref-count decrement does the trick and
5852 * the clear-bit is harmless.
5854 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
5856 if (atomic_dec_and_test(&c->refcount)) {
5859 i = c - h->cmd_pool;
5860 clear_bit(i & (BITS_PER_LONG - 1),
5861 h->cmd_pool_bits + (i / BITS_PER_LONG));
5865 #ifdef CONFIG_COMPAT
5867 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
5870 IOCTL32_Command_struct __user *arg32 =
5871 (IOCTL32_Command_struct __user *) arg;
5872 IOCTL_Command_struct arg64;
5873 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
5877 memset(&arg64, 0, sizeof(arg64));
5879 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5880 sizeof(arg64.LUN_info));
5881 err |= copy_from_user(&arg64.Request, &arg32->Request,
5882 sizeof(arg64.Request));
5883 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5884 sizeof(arg64.error_info));
5885 err |= get_user(arg64.buf_size, &arg32->buf_size);
5886 err |= get_user(cp, &arg32->buf);
5887 arg64.buf = compat_ptr(cp);
5888 err |= copy_to_user(p, &arg64, sizeof(arg64));
5893 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
5896 err |= copy_in_user(&arg32->error_info, &p->error_info,
5897 sizeof(arg32->error_info));
5903 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
5904 int cmd, void __user *arg)
5906 BIG_IOCTL32_Command_struct __user *arg32 =
5907 (BIG_IOCTL32_Command_struct __user *) arg;
5908 BIG_IOCTL_Command_struct arg64;
5909 BIG_IOCTL_Command_struct __user *p =
5910 compat_alloc_user_space(sizeof(arg64));
5914 memset(&arg64, 0, sizeof(arg64));
5916 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
5917 sizeof(arg64.LUN_info));
5918 err |= copy_from_user(&arg64.Request, &arg32->Request,
5919 sizeof(arg64.Request));
5920 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
5921 sizeof(arg64.error_info));
5922 err |= get_user(arg64.buf_size, &arg32->buf_size);
5923 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
5924 err |= get_user(cp, &arg32->buf);
5925 arg64.buf = compat_ptr(cp);
5926 err |= copy_to_user(p, &arg64, sizeof(arg64));
5931 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
5934 err |= copy_in_user(&arg32->error_info, &p->error_info,
5935 sizeof(arg32->error_info));
5941 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5944 case CCISS_GETPCIINFO:
5945 case CCISS_GETINTINFO:
5946 case CCISS_SETINTINFO:
5947 case CCISS_GETNODENAME:
5948 case CCISS_SETNODENAME:
5949 case CCISS_GETHEARTBEAT:
5950 case CCISS_GETBUSTYPES:
5951 case CCISS_GETFIRMVER:
5952 case CCISS_GETDRIVVER:
5953 case CCISS_REVALIDVOLS:
5954 case CCISS_DEREGDISK:
5955 case CCISS_REGNEWDISK:
5957 case CCISS_RESCANDISK:
5958 case CCISS_GETLUNINFO:
5959 return hpsa_ioctl(dev, cmd, arg);
5961 case CCISS_PASSTHRU32:
5962 return hpsa_ioctl32_passthru(dev, cmd, arg);
5963 case CCISS_BIG_PASSTHRU32:
5964 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
5967 return -ENOIOCTLCMD;
5972 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
5974 struct hpsa_pci_info pciinfo;
5978 pciinfo.domain = pci_domain_nr(h->pdev->bus);
5979 pciinfo.bus = h->pdev->bus->number;
5980 pciinfo.dev_fn = h->pdev->devfn;
5981 pciinfo.board_id = h->board_id;
5982 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
5987 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
5989 DriverVer_type DriverVer;
5990 unsigned char vmaj, vmin, vsubmin;
5993 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
5994 &vmaj, &vmin, &vsubmin);
5996 dev_info(&h->pdev->dev, "driver version string '%s' "
5997 "unrecognized.", HPSA_DRIVER_VERSION);
6002 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
6005 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
6010 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6012 IOCTL_Command_struct iocommand;
6013 struct CommandList *c;
6020 if (!capable(CAP_SYS_RAWIO))
6022 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
6024 if ((iocommand.buf_size < 1) &&
6025 (iocommand.Request.Type.Direction != XFER_NONE)) {
6028 if (iocommand.buf_size > 0) {
6029 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
6032 if (iocommand.Request.Type.Direction & XFER_WRITE) {
6033 /* Copy the data into the buffer we created */
6034 if (copy_from_user(buff, iocommand.buf,
6035 iocommand.buf_size)) {
6040 memset(buff, 0, iocommand.buf_size);
6045 /* Fill in the command type */
6046 c->cmd_type = CMD_IOCTL_PEND;
6047 c->scsi_cmd = SCSI_CMD_BUSY;
6048 /* Fill in Command Header */
6049 c->Header.ReplyQueue = 0; /* unused in simple mode */
6050 if (iocommand.buf_size > 0) { /* buffer to fill */
6051 c->Header.SGList = 1;
6052 c->Header.SGTotal = cpu_to_le16(1);
6053 } else { /* no buffers to fill */
6054 c->Header.SGList = 0;
6055 c->Header.SGTotal = cpu_to_le16(0);
6057 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
6059 /* Fill in Request block */
6060 memcpy(&c->Request, &iocommand.Request,
6061 sizeof(c->Request));
6063 /* Fill in the scatter gather information */
6064 if (iocommand.buf_size > 0) {
6065 temp64 = pci_map_single(h->pdev, buff,
6066 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
6067 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
6068 c->SG[0].Addr = cpu_to_le64(0);
6069 c->SG[0].Len = cpu_to_le32(0);
6073 c->SG[0].Addr = cpu_to_le64(temp64);
6074 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
6075 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
6077 rc = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6078 if (iocommand.buf_size > 0)
6079 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
6080 check_ioctl_unit_attention(h, c);
6086 /* Copy the error information out */
6087 memcpy(&iocommand.error_info, c->err_info,
6088 sizeof(iocommand.error_info));
6089 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
6093 if ((iocommand.Request.Type.Direction & XFER_READ) &&
6094 iocommand.buf_size > 0) {
6095 /* Copy the data out of the buffer we created */
6096 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
6108 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
6110 BIG_IOCTL_Command_struct *ioc;
6111 struct CommandList *c;
6112 unsigned char **buff = NULL;
6113 int *buff_size = NULL;
6119 BYTE __user *data_ptr;
6123 if (!capable(CAP_SYS_RAWIO))
6125 ioc = (BIG_IOCTL_Command_struct *)
6126 kmalloc(sizeof(*ioc), GFP_KERNEL);
6131 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
6135 if ((ioc->buf_size < 1) &&
6136 (ioc->Request.Type.Direction != XFER_NONE)) {
6140 /* Check kmalloc limits using all SGs */
6141 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
6145 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
6149 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
6154 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
6159 left = ioc->buf_size;
6160 data_ptr = ioc->buf;
6162 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
6163 buff_size[sg_used] = sz;
6164 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
6165 if (buff[sg_used] == NULL) {
6169 if (ioc->Request.Type.Direction & XFER_WRITE) {
6170 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
6175 memset(buff[sg_used], 0, sz);
6182 c->cmd_type = CMD_IOCTL_PEND;
6183 c->scsi_cmd = SCSI_CMD_BUSY;
6184 c->Header.ReplyQueue = 0;
6185 c->Header.SGList = (u8) sg_used;
6186 c->Header.SGTotal = cpu_to_le16(sg_used);
6187 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
6188 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
6189 if (ioc->buf_size > 0) {
6191 for (i = 0; i < sg_used; i++) {
6192 temp64 = pci_map_single(h->pdev, buff[i],
6193 buff_size[i], PCI_DMA_BIDIRECTIONAL);
6194 if (dma_mapping_error(&h->pdev->dev,
6195 (dma_addr_t) temp64)) {
6196 c->SG[i].Addr = cpu_to_le64(0);
6197 c->SG[i].Len = cpu_to_le32(0);
6198 hpsa_pci_unmap(h->pdev, c, i,
6199 PCI_DMA_BIDIRECTIONAL);
6203 c->SG[i].Addr = cpu_to_le64(temp64);
6204 c->SG[i].Len = cpu_to_le32(buff_size[i]);
6205 c->SG[i].Ext = cpu_to_le32(0);
6207 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
6209 status = hpsa_scsi_do_simple_cmd(h, c, DEFAULT_REPLY_QUEUE, NO_TIMEOUT);
6211 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
6212 check_ioctl_unit_attention(h, c);
6218 /* Copy the error information out */
6219 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
6220 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
6224 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
6227 /* Copy the data out of the buffer we created */
6228 BYTE __user *ptr = ioc->buf;
6229 for (i = 0; i < sg_used; i++) {
6230 if (copy_to_user(ptr, buff[i], buff_size[i])) {
6234 ptr += buff_size[i];
6244 for (i = 0; i < sg_used; i++)
6253 static void check_ioctl_unit_attention(struct ctlr_info *h,
6254 struct CommandList *c)
6256 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
6257 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
6258 (void) check_for_unit_attention(h, c);
6264 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
6266 struct ctlr_info *h;
6267 void __user *argp = (void __user *)arg;
6270 h = sdev_to_hba(dev);
6273 case CCISS_DEREGDISK:
6274 case CCISS_REGNEWDISK:
6276 hpsa_scan_start(h->scsi_host);
6278 case CCISS_GETPCIINFO:
6279 return hpsa_getpciinfo_ioctl(h, argp);
6280 case CCISS_GETDRIVVER:
6281 return hpsa_getdrivver_ioctl(h, argp);
6282 case CCISS_PASSTHRU:
6283 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6285 rc = hpsa_passthru_ioctl(h, argp);
6286 atomic_inc(&h->passthru_cmds_avail);
6288 case CCISS_BIG_PASSTHRU:
6289 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
6291 rc = hpsa_big_passthru_ioctl(h, argp);
6292 atomic_inc(&h->passthru_cmds_avail);
6299 static void hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
6302 struct CommandList *c;
6306 /* fill_cmd can't fail here, no data buffer to map */
6307 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
6308 RAID_CTLR_LUNID, TYPE_MSG);
6309 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
6311 enqueue_cmd_and_start_io(h, c);
6312 /* Don't wait for completion, the reset won't complete. Don't free
6313 * the command either. This is the last command we will send before
6314 * re-initializing everything, so it doesn't matter and won't leak.
6319 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
6320 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
6323 int pci_dir = XFER_NONE;
6324 u64 tag; /* for commands to be aborted */
6326 c->cmd_type = CMD_IOCTL_PEND;
6327 c->scsi_cmd = SCSI_CMD_BUSY;
6328 c->Header.ReplyQueue = 0;
6329 if (buff != NULL && size > 0) {
6330 c->Header.SGList = 1;
6331 c->Header.SGTotal = cpu_to_le16(1);
6333 c->Header.SGList = 0;
6334 c->Header.SGTotal = cpu_to_le16(0);
6336 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
6338 if (cmd_type == TYPE_CMD) {
6341 /* are we trying to read a vital product page */
6342 if (page_code & VPD_PAGE) {
6343 c->Request.CDB[1] = 0x01;
6344 c->Request.CDB[2] = (page_code & 0xff);
6346 c->Request.CDBLen = 6;
6347 c->Request.type_attr_dir =
6348 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6349 c->Request.Timeout = 0;
6350 c->Request.CDB[0] = HPSA_INQUIRY;
6351 c->Request.CDB[4] = size & 0xFF;
6353 case HPSA_REPORT_LOG:
6354 case HPSA_REPORT_PHYS:
6355 /* Talking to controller so It's a physical command
6356 mode = 00 target = 0. Nothing to write.
6358 c->Request.CDBLen = 12;
6359 c->Request.type_attr_dir =
6360 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6361 c->Request.Timeout = 0;
6362 c->Request.CDB[0] = cmd;
6363 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6364 c->Request.CDB[7] = (size >> 16) & 0xFF;
6365 c->Request.CDB[8] = (size >> 8) & 0xFF;
6366 c->Request.CDB[9] = size & 0xFF;
6368 case HPSA_CACHE_FLUSH:
6369 c->Request.CDBLen = 12;
6370 c->Request.type_attr_dir =
6371 TYPE_ATTR_DIR(cmd_type,
6372 ATTR_SIMPLE, XFER_WRITE);
6373 c->Request.Timeout = 0;
6374 c->Request.CDB[0] = BMIC_WRITE;
6375 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
6376 c->Request.CDB[7] = (size >> 8) & 0xFF;
6377 c->Request.CDB[8] = size & 0xFF;
6379 case TEST_UNIT_READY:
6380 c->Request.CDBLen = 6;
6381 c->Request.type_attr_dir =
6382 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6383 c->Request.Timeout = 0;
6385 case HPSA_GET_RAID_MAP:
6386 c->Request.CDBLen = 12;
6387 c->Request.type_attr_dir =
6388 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6389 c->Request.Timeout = 0;
6390 c->Request.CDB[0] = HPSA_CISS_READ;
6391 c->Request.CDB[1] = cmd;
6392 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
6393 c->Request.CDB[7] = (size >> 16) & 0xFF;
6394 c->Request.CDB[8] = (size >> 8) & 0xFF;
6395 c->Request.CDB[9] = size & 0xFF;
6397 case BMIC_SENSE_CONTROLLER_PARAMETERS:
6398 c->Request.CDBLen = 10;
6399 c->Request.type_attr_dir =
6400 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6401 c->Request.Timeout = 0;
6402 c->Request.CDB[0] = BMIC_READ;
6403 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
6404 c->Request.CDB[7] = (size >> 16) & 0xFF;
6405 c->Request.CDB[8] = (size >> 8) & 0xFF;
6407 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
6408 c->Request.CDBLen = 10;
6409 c->Request.type_attr_dir =
6410 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
6411 c->Request.Timeout = 0;
6412 c->Request.CDB[0] = BMIC_READ;
6413 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
6414 c->Request.CDB[7] = (size >> 16) & 0xFF;
6415 c->Request.CDB[8] = (size >> 8) & 0XFF;
6418 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
6422 } else if (cmd_type == TYPE_MSG) {
6425 case HPSA_PHYS_TARGET_RESET:
6426 c->Request.CDBLen = 16;
6427 c->Request.type_attr_dir =
6428 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6429 c->Request.Timeout = 0; /* Don't time out */
6430 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6431 c->Request.CDB[0] = HPSA_RESET;
6432 c->Request.CDB[1] = HPSA_TARGET_RESET_TYPE;
6433 /* Physical target reset needs no control bytes 4-7*/
6434 c->Request.CDB[4] = 0x00;
6435 c->Request.CDB[5] = 0x00;
6436 c->Request.CDB[6] = 0x00;
6437 c->Request.CDB[7] = 0x00;
6439 case HPSA_DEVICE_RESET_MSG:
6440 c->Request.CDBLen = 16;
6441 c->Request.type_attr_dir =
6442 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
6443 c->Request.Timeout = 0; /* Don't time out */
6444 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
6445 c->Request.CDB[0] = cmd;
6446 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
6447 /* If bytes 4-7 are zero, it means reset the */
6449 c->Request.CDB[4] = 0x00;
6450 c->Request.CDB[5] = 0x00;
6451 c->Request.CDB[6] = 0x00;
6452 c->Request.CDB[7] = 0x00;
6454 case HPSA_ABORT_MSG:
6455 memcpy(&tag, buff, sizeof(tag));
6456 dev_dbg(&h->pdev->dev,
6457 "Abort Tag:0x%016llx using rqst Tag:0x%016llx",
6458 tag, c->Header.tag);
6459 c->Request.CDBLen = 16;
6460 c->Request.type_attr_dir =
6461 TYPE_ATTR_DIR(cmd_type,
6462 ATTR_SIMPLE, XFER_WRITE);
6463 c->Request.Timeout = 0; /* Don't time out */
6464 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
6465 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
6466 c->Request.CDB[2] = 0x00; /* reserved */
6467 c->Request.CDB[3] = 0x00; /* reserved */
6468 /* Tag to abort goes in CDB[4]-CDB[11] */
6469 memcpy(&c->Request.CDB[4], &tag, sizeof(tag));
6470 c->Request.CDB[12] = 0x00; /* reserved */
6471 c->Request.CDB[13] = 0x00; /* reserved */
6472 c->Request.CDB[14] = 0x00; /* reserved */
6473 c->Request.CDB[15] = 0x00; /* reserved */
6476 dev_warn(&h->pdev->dev, "unknown message type %d\n",
6481 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
6485 switch (GET_DIR(c->Request.type_attr_dir)) {
6487 pci_dir = PCI_DMA_FROMDEVICE;
6490 pci_dir = PCI_DMA_TODEVICE;
6493 pci_dir = PCI_DMA_NONE;
6496 pci_dir = PCI_DMA_BIDIRECTIONAL;
6498 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
6504 * Map (physical) PCI mem into (virtual) kernel space
6506 static void __iomem *remap_pci_mem(ulong base, ulong size)
6508 ulong page_base = ((ulong) base) & PAGE_MASK;
6509 ulong page_offs = ((ulong) base) - page_base;
6510 void __iomem *page_remapped = ioremap_nocache(page_base,
6513 return page_remapped ? (page_remapped + page_offs) : NULL;
6516 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
6518 return h->access.command_completed(h, q);
6521 static inline bool interrupt_pending(struct ctlr_info *h)
6523 return h->access.intr_pending(h);
6526 static inline long interrupt_not_for_us(struct ctlr_info *h)
6528 return (h->access.intr_pending(h) == 0) ||
6529 (h->interrupts_enabled == 0);
6532 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
6535 if (unlikely(tag_index >= h->nr_cmds)) {
6536 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
6542 static inline void finish_cmd(struct CommandList *c)
6544 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
6545 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
6546 || c->cmd_type == CMD_IOACCEL2))
6547 complete_scsi_command(c);
6548 else if (c->cmd_type == CMD_IOCTL_PEND || c->cmd_type == IOACCEL2_TMF)
6549 complete(c->waiting);
6552 /* process completion of an indexed ("direct lookup") command */
6553 static inline void process_indexed_cmd(struct ctlr_info *h,
6557 struct CommandList *c;
6559 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
6560 if (!bad_tag(h, tag_index, raw_tag)) {
6561 c = h->cmd_pool + tag_index;
6566 /* Some controllers, like p400, will give us one interrupt
6567 * after a soft reset, even if we turned interrupts off.
6568 * Only need to check for this in the hpsa_xxx_discard_completions
6571 static int ignore_bogus_interrupt(struct ctlr_info *h)
6573 if (likely(!reset_devices))
6576 if (likely(h->interrupts_enabled))
6579 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
6580 "(known firmware bug.) Ignoring.\n");
6586 * Convert &h->q[x] (passed to interrupt handlers) back to h.
6587 * Relies on (h-q[x] == x) being true for x such that
6588 * 0 <= x < MAX_REPLY_QUEUES.
6590 static struct ctlr_info *queue_to_hba(u8 *queue)
6592 return container_of((queue - *queue), struct ctlr_info, q[0]);
6595 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
6597 struct ctlr_info *h = queue_to_hba(queue);
6598 u8 q = *(u8 *) queue;
6601 if (ignore_bogus_interrupt(h))
6604 if (interrupt_not_for_us(h))
6606 h->last_intr_timestamp = get_jiffies_64();
6607 while (interrupt_pending(h)) {
6608 raw_tag = get_next_completion(h, q);
6609 while (raw_tag != FIFO_EMPTY)
6610 raw_tag = next_command(h, q);
6615 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
6617 struct ctlr_info *h = queue_to_hba(queue);
6619 u8 q = *(u8 *) queue;
6621 if (ignore_bogus_interrupt(h))
6624 h->last_intr_timestamp = get_jiffies_64();
6625 raw_tag = get_next_completion(h, q);
6626 while (raw_tag != FIFO_EMPTY)
6627 raw_tag = next_command(h, q);
6631 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
6633 struct ctlr_info *h = queue_to_hba((u8 *) queue);
6635 u8 q = *(u8 *) queue;
6637 if (interrupt_not_for_us(h))
6639 h->last_intr_timestamp = get_jiffies_64();
6640 while (interrupt_pending(h)) {
6641 raw_tag = get_next_completion(h, q);
6642 while (raw_tag != FIFO_EMPTY) {
6643 process_indexed_cmd(h, raw_tag);
6644 raw_tag = next_command(h, q);
6650 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
6652 struct ctlr_info *h = queue_to_hba(queue);
6654 u8 q = *(u8 *) queue;
6656 h->last_intr_timestamp = get_jiffies_64();
6657 raw_tag = get_next_completion(h, q);
6658 while (raw_tag != FIFO_EMPTY) {
6659 process_indexed_cmd(h, raw_tag);
6660 raw_tag = next_command(h, q);
6665 /* Send a message CDB to the firmware. Careful, this only works
6666 * in simple mode, not performant mode due to the tag lookup.
6667 * We only ever use this immediately after a controller reset.
6669 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
6673 struct CommandListHeader CommandHeader;
6674 struct RequestBlock Request;
6675 struct ErrDescriptor ErrorDescriptor;
6677 struct Command *cmd;
6678 static const size_t cmd_sz = sizeof(*cmd) +
6679 sizeof(cmd->ErrorDescriptor);
6683 void __iomem *vaddr;
6686 vaddr = pci_ioremap_bar(pdev, 0);
6690 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
6691 * CCISS commands, so they must be allocated from the lower 4GiB of
6694 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
6700 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
6706 /* This must fit, because of the 32-bit consistent DMA mask. Also,
6707 * although there's no guarantee, we assume that the address is at
6708 * least 4-byte aligned (most likely, it's page-aligned).
6710 paddr32 = cpu_to_le32(paddr64);
6712 cmd->CommandHeader.ReplyQueue = 0;
6713 cmd->CommandHeader.SGList = 0;
6714 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
6715 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
6716 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
6718 cmd->Request.CDBLen = 16;
6719 cmd->Request.type_attr_dir =
6720 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
6721 cmd->Request.Timeout = 0; /* Don't time out */
6722 cmd->Request.CDB[0] = opcode;
6723 cmd->Request.CDB[1] = type;
6724 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
6725 cmd->ErrorDescriptor.Addr =
6726 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
6727 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
6729 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
6731 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
6732 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
6733 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
6735 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
6740 /* we leak the DMA buffer here ... no choice since the controller could
6741 * still complete the command.
6743 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
6744 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
6749 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
6751 if (tag & HPSA_ERROR_BIT) {
6752 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
6757 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
6762 #define hpsa_noop(p) hpsa_message(p, 3, 0)
6764 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
6765 void __iomem *vaddr, u32 use_doorbell)
6769 /* For everything after the P600, the PCI power state method
6770 * of resetting the controller doesn't work, so we have this
6771 * other way using the doorbell register.
6773 dev_info(&pdev->dev, "using doorbell to reset controller\n");
6774 writel(use_doorbell, vaddr + SA5_DOORBELL);
6776 /* PMC hardware guys tell us we need a 10 second delay after
6777 * doorbell reset and before any attempt to talk to the board
6778 * at all to ensure that this actually works and doesn't fall
6779 * over in some weird corner cases.
6782 } else { /* Try to do it the PCI power state way */
6784 /* Quoting from the Open CISS Specification: "The Power
6785 * Management Control/Status Register (CSR) controls the power
6786 * state of the device. The normal operating state is D0,
6787 * CSR=00h. The software off state is D3, CSR=03h. To reset
6788 * the controller, place the interface device in D3 then to D0,
6789 * this causes a secondary PCI reset which will reset the
6794 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
6796 /* enter the D3hot power management state */
6797 rc = pci_set_power_state(pdev, PCI_D3hot);
6803 /* enter the D0 power management state */
6804 rc = pci_set_power_state(pdev, PCI_D0);
6809 * The P600 requires a small delay when changing states.
6810 * Otherwise we may think the board did not reset and we bail.
6811 * This for kdump only and is particular to the P600.
6818 static void init_driver_version(char *driver_version, int len)
6820 memset(driver_version, 0, len);
6821 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
6824 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
6826 char *driver_version;
6827 int i, size = sizeof(cfgtable->driver_version);
6829 driver_version = kmalloc(size, GFP_KERNEL);
6830 if (!driver_version)
6833 init_driver_version(driver_version, size);
6834 for (i = 0; i < size; i++)
6835 writeb(driver_version[i], &cfgtable->driver_version[i]);
6836 kfree(driver_version);
6840 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
6841 unsigned char *driver_ver)
6845 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
6846 driver_ver[i] = readb(&cfgtable->driver_version[i]);
6849 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
6852 char *driver_ver, *old_driver_ver;
6853 int rc, size = sizeof(cfgtable->driver_version);
6855 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
6856 if (!old_driver_ver)
6858 driver_ver = old_driver_ver + size;
6860 /* After a reset, the 32 bytes of "driver version" in the cfgtable
6861 * should have been changed, otherwise we know the reset failed.
6863 init_driver_version(old_driver_ver, size);
6864 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
6865 rc = !memcmp(driver_ver, old_driver_ver, size);
6866 kfree(old_driver_ver);
6869 /* This does a hard reset of the controller using PCI power management
6870 * states or the using the doorbell register.
6872 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev, u32 board_id)
6876 u64 cfg_base_addr_index;
6877 void __iomem *vaddr;
6878 unsigned long paddr;
6879 u32 misc_fw_support;
6881 struct CfgTable __iomem *cfgtable;
6883 u16 command_register;
6885 /* For controllers as old as the P600, this is very nearly
6888 * pci_save_state(pci_dev);
6889 * pci_set_power_state(pci_dev, PCI_D3hot);
6890 * pci_set_power_state(pci_dev, PCI_D0);
6891 * pci_restore_state(pci_dev);
6893 * For controllers newer than the P600, the pci power state
6894 * method of resetting doesn't work so we have another way
6895 * using the doorbell register.
6898 if (!ctlr_is_resettable(board_id)) {
6899 dev_warn(&pdev->dev, "Controller not resettable\n");
6903 /* if controller is soft- but not hard resettable... */
6904 if (!ctlr_is_hard_resettable(board_id))
6905 return -ENOTSUPP; /* try soft reset later. */
6907 /* Save the PCI command register */
6908 pci_read_config_word(pdev, 4, &command_register);
6909 pci_save_state(pdev);
6911 /* find the first memory BAR, so we can find the cfg table */
6912 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
6915 vaddr = remap_pci_mem(paddr, 0x250);
6919 /* find cfgtable in order to check if reset via doorbell is supported */
6920 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
6921 &cfg_base_addr_index, &cfg_offset);
6924 cfgtable = remap_pci_mem(pci_resource_start(pdev,
6925 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
6930 rc = write_driver_ver_to_cfgtable(cfgtable);
6932 goto unmap_cfgtable;
6934 /* If reset via doorbell register is supported, use that.
6935 * There are two such methods. Favor the newest method.
6937 misc_fw_support = readl(&cfgtable->misc_fw_support);
6938 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
6940 use_doorbell = DOORBELL_CTLR_RESET2;
6942 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
6944 dev_warn(&pdev->dev,
6945 "Soft reset not supported. Firmware update is required.\n");
6946 rc = -ENOTSUPP; /* try soft reset */
6947 goto unmap_cfgtable;
6951 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
6953 goto unmap_cfgtable;
6955 pci_restore_state(pdev);
6956 pci_write_config_word(pdev, 4, command_register);
6958 /* Some devices (notably the HP Smart Array 5i Controller)
6959 need a little pause here */
6960 msleep(HPSA_POST_RESET_PAUSE_MSECS);
6962 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
6964 dev_warn(&pdev->dev,
6965 "Failed waiting for board to become ready after hard reset\n");
6966 goto unmap_cfgtable;
6969 rc = controller_reset_failed(vaddr);
6971 goto unmap_cfgtable;
6973 dev_warn(&pdev->dev, "Unable to successfully reset "
6974 "controller. Will try soft reset.\n");
6977 dev_info(&pdev->dev, "board ready after hard reset.\n");
6989 * We cannot read the structure directly, for portability we must use
6991 * This is for debug only.
6993 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
6999 dev_info(dev, "Controller Configuration information\n");
7000 dev_info(dev, "------------------------------------\n");
7001 for (i = 0; i < 4; i++)
7002 temp_name[i] = readb(&(tb->Signature[i]));
7003 temp_name[4] = '\0';
7004 dev_info(dev, " Signature = %s\n", temp_name);
7005 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
7006 dev_info(dev, " Transport methods supported = 0x%x\n",
7007 readl(&(tb->TransportSupport)));
7008 dev_info(dev, " Transport methods active = 0x%x\n",
7009 readl(&(tb->TransportActive)));
7010 dev_info(dev, " Requested transport Method = 0x%x\n",
7011 readl(&(tb->HostWrite.TransportRequest)));
7012 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
7013 readl(&(tb->HostWrite.CoalIntDelay)));
7014 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
7015 readl(&(tb->HostWrite.CoalIntCount)));
7016 dev_info(dev, " Max outstanding commands = %d\n",
7017 readl(&(tb->CmdsOutMax)));
7018 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
7019 for (i = 0; i < 16; i++)
7020 temp_name[i] = readb(&(tb->ServerName[i]));
7021 temp_name[16] = '\0';
7022 dev_info(dev, " Server Name = %s\n", temp_name);
7023 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
7024 readl(&(tb->HeartBeat)));
7025 #endif /* HPSA_DEBUG */
7028 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
7030 int i, offset, mem_type, bar_type;
7032 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
7035 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
7036 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
7037 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
7040 mem_type = pci_resource_flags(pdev, i) &
7041 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
7043 case PCI_BASE_ADDRESS_MEM_TYPE_32:
7044 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
7045 offset += 4; /* 32 bit */
7047 case PCI_BASE_ADDRESS_MEM_TYPE_64:
7050 default: /* reserved in PCI 2.2 */
7051 dev_warn(&pdev->dev,
7052 "base address is invalid\n");
7057 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
7063 static void hpsa_disable_interrupt_mode(struct ctlr_info *h)
7065 if (h->msix_vector) {
7066 if (h->pdev->msix_enabled)
7067 pci_disable_msix(h->pdev);
7069 } else if (h->msi_vector) {
7070 if (h->pdev->msi_enabled)
7071 pci_disable_msi(h->pdev);
7076 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
7077 * controllers that are capable. If not, we use legacy INTx mode.
7079 static void hpsa_interrupt_mode(struct ctlr_info *h)
7081 #ifdef CONFIG_PCI_MSI
7083 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
7085 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
7086 hpsa_msix_entries[i].vector = 0;
7087 hpsa_msix_entries[i].entry = i;
7090 /* Some boards advertise MSI but don't really support it */
7091 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
7092 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
7093 goto default_int_mode;
7094 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
7095 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
7096 h->msix_vector = MAX_REPLY_QUEUES;
7097 if (h->msix_vector > num_online_cpus())
7098 h->msix_vector = num_online_cpus();
7099 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
7102 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
7104 goto single_msi_mode;
7105 } else if (err < h->msix_vector) {
7106 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
7107 "available\n", err);
7109 h->msix_vector = err;
7110 for (i = 0; i < h->msix_vector; i++)
7111 h->intr[i] = hpsa_msix_entries[i].vector;
7115 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
7116 dev_info(&h->pdev->dev, "MSI capable controller\n");
7117 if (!pci_enable_msi(h->pdev))
7120 dev_warn(&h->pdev->dev, "MSI init failed\n");
7123 #endif /* CONFIG_PCI_MSI */
7124 /* if we get here we're going to use the default interrupt mode */
7125 h->intr[h->intr_mode] = h->pdev->irq;
7128 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
7131 u32 subsystem_vendor_id, subsystem_device_id;
7133 subsystem_vendor_id = pdev->subsystem_vendor;
7134 subsystem_device_id = pdev->subsystem_device;
7135 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
7136 subsystem_vendor_id;
7138 for (i = 0; i < ARRAY_SIZE(products); i++)
7139 if (*board_id == products[i].board_id)
7142 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
7143 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
7145 dev_warn(&pdev->dev, "unrecognized board ID: "
7146 "0x%08x, ignoring.\n", *board_id);
7149 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
7152 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
7153 unsigned long *memory_bar)
7157 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
7158 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
7159 /* addressing mode bits already removed */
7160 *memory_bar = pci_resource_start(pdev, i);
7161 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
7165 dev_warn(&pdev->dev, "no memory BAR found\n");
7169 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
7175 iterations = HPSA_BOARD_READY_ITERATIONS;
7177 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
7179 for (i = 0; i < iterations; i++) {
7180 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
7181 if (wait_for_ready) {
7182 if (scratchpad == HPSA_FIRMWARE_READY)
7185 if (scratchpad != HPSA_FIRMWARE_READY)
7188 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
7190 dev_warn(&pdev->dev, "board not ready, timed out.\n");
7194 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
7195 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
7198 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
7199 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
7200 *cfg_base_addr &= (u32) 0x0000ffff;
7201 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
7202 if (*cfg_base_addr_index == -1) {
7203 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
7209 static void hpsa_free_cfgtables(struct ctlr_info *h)
7211 if (h->transtable) {
7212 iounmap(h->transtable);
7213 h->transtable = NULL;
7216 iounmap(h->cfgtable);
7221 /* Find and map CISS config table and transfer table
7222 + * several items must be unmapped (freed) later
7224 static int hpsa_find_cfgtables(struct ctlr_info *h)
7228 u64 cfg_base_addr_index;
7232 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7233 &cfg_base_addr_index, &cfg_offset);
7236 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
7237 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
7239 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
7242 rc = write_driver_ver_to_cfgtable(h->cfgtable);
7245 /* Find performant mode table. */
7246 trans_offset = readl(&h->cfgtable->TransMethodOffset);
7247 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
7248 cfg_base_addr_index)+cfg_offset+trans_offset,
7249 sizeof(*h->transtable));
7250 if (!h->transtable) {
7251 dev_err(&h->pdev->dev, "Failed mapping transfer table\n");
7252 hpsa_free_cfgtables(h);
7258 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
7260 #define MIN_MAX_COMMANDS 16
7261 BUILD_BUG_ON(MIN_MAX_COMMANDS <= HPSA_NRESERVED_CMDS);
7263 h->max_commands = readl(&h->cfgtable->MaxPerformantModeCommands);
7265 /* Limit commands in memory limited kdump scenario. */
7266 if (reset_devices && h->max_commands > 32)
7267 h->max_commands = 32;
7269 if (h->max_commands < MIN_MAX_COMMANDS) {
7270 dev_warn(&h->pdev->dev,
7271 "Controller reports max supported commands of %d Using %d instead. Ensure that firmware is up to date.\n",
7274 h->max_commands = MIN_MAX_COMMANDS;
7278 /* If the controller reports that the total max sg entries is greater than 512,
7279 * then we know that chained SG blocks work. (Original smart arrays did not
7280 * support chained SG blocks and would return zero for max sg entries.)
7282 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
7284 return h->maxsgentries > 512;
7287 /* Interrogate the hardware for some limits:
7288 * max commands, max SG elements without chaining, and with chaining,
7289 * SG chain block size, etc.
7291 static void hpsa_find_board_params(struct ctlr_info *h)
7293 hpsa_get_max_perf_mode_cmds(h);
7294 h->nr_cmds = h->max_commands;
7295 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
7296 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
7297 if (hpsa_supports_chained_sg_blocks(h)) {
7298 /* Limit in-command s/g elements to 32 save dma'able memory. */
7299 h->max_cmd_sg_entries = 32;
7300 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
7301 h->maxsgentries--; /* save one for chain pointer */
7304 * Original smart arrays supported at most 31 s/g entries
7305 * embedded inline in the command (trying to use more
7306 * would lock up the controller)
7308 h->max_cmd_sg_entries = 31;
7309 h->maxsgentries = 31; /* default to traditional values */
7313 /* Find out what task management functions are supported and cache */
7314 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
7315 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
7316 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
7317 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
7318 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
7319 if (!(HPSATMF_IOACCEL_ENABLED & h->TMFSupportFlags))
7320 dev_warn(&h->pdev->dev, "HP SSD Smart Path aborts not supported\n");
7323 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
7325 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
7326 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
7332 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
7336 driver_support = readl(&(h->cfgtable->driver_support));
7337 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
7339 driver_support |= ENABLE_SCSI_PREFETCH;
7341 driver_support |= ENABLE_UNIT_ATTN;
7342 writel(driver_support, &(h->cfgtable->driver_support));
7345 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
7346 * in a prefetch beyond physical memory.
7348 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
7352 if (h->board_id != 0x3225103C)
7354 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
7355 dma_prefetch |= 0x8000;
7356 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
7359 static int hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
7363 unsigned long flags;
7364 /* wait until the clear_event_notify bit 6 is cleared by controller. */
7365 for (i = 0; i < MAX_CLEAR_EVENT_WAIT; i++) {
7366 spin_lock_irqsave(&h->lock, flags);
7367 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7368 spin_unlock_irqrestore(&h->lock, flags);
7369 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
7371 /* delay and try again */
7372 msleep(CLEAR_EVENT_WAIT_INTERVAL);
7379 static int hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
7383 unsigned long flags;
7385 /* under certain very rare conditions, this can take awhile.
7386 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
7387 * as we enter this code.)
7389 for (i = 0; i < MAX_MODE_CHANGE_WAIT; i++) {
7390 if (h->remove_in_progress)
7392 spin_lock_irqsave(&h->lock, flags);
7393 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
7394 spin_unlock_irqrestore(&h->lock, flags);
7395 if (!(doorbell_value & CFGTBL_ChangeReq))
7397 /* delay and try again */
7398 msleep(MODE_CHANGE_WAIT_INTERVAL);
7405 /* return -ENODEV or other reason on error, 0 on success */
7406 static int hpsa_enter_simple_mode(struct ctlr_info *h)
7410 trans_support = readl(&(h->cfgtable->TransportSupport));
7411 if (!(trans_support & SIMPLE_MODE))
7414 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
7416 /* Update the field, and then ring the doorbell */
7417 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
7418 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7419 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7420 if (hpsa_wait_for_mode_change_ack(h))
7422 print_cfg_table(&h->pdev->dev, h->cfgtable);
7423 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
7425 h->transMethod = CFGTBL_Trans_Simple;
7428 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
7432 /* free items allocated or mapped by hpsa_pci_init */
7433 static void hpsa_free_pci_init(struct ctlr_info *h)
7435 hpsa_free_cfgtables(h); /* pci_init 4 */
7436 iounmap(h->vaddr); /* pci_init 3 */
7438 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
7440 * call pci_disable_device before pci_release_regions per
7441 * Documentation/PCI/pci.txt
7443 pci_disable_device(h->pdev); /* pci_init 1 */
7444 pci_release_regions(h->pdev); /* pci_init 2 */
7447 /* several items must be freed later */
7448 static int hpsa_pci_init(struct ctlr_info *h)
7450 int prod_index, err;
7452 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
7455 h->product_name = products[prod_index].product_name;
7456 h->access = *(products[prod_index].access);
7458 h->needs_abort_tags_swizzled =
7459 ctlr_needs_abort_tags_swizzled(h->board_id);
7461 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
7462 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
7464 err = pci_enable_device(h->pdev);
7466 dev_err(&h->pdev->dev, "failed to enable PCI device\n");
7467 pci_disable_device(h->pdev);
7471 err = pci_request_regions(h->pdev, HPSA);
7473 dev_err(&h->pdev->dev,
7474 "failed to obtain PCI resources\n");
7475 pci_disable_device(h->pdev);
7479 pci_set_master(h->pdev);
7481 hpsa_interrupt_mode(h);
7482 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
7484 goto clean2; /* intmode+region, pci */
7485 h->vaddr = remap_pci_mem(h->paddr, 0x250);
7487 dev_err(&h->pdev->dev, "failed to remap PCI mem\n");
7489 goto clean2; /* intmode+region, pci */
7491 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7493 goto clean3; /* vaddr, intmode+region, pci */
7494 err = hpsa_find_cfgtables(h);
7496 goto clean3; /* vaddr, intmode+region, pci */
7497 hpsa_find_board_params(h);
7499 if (!hpsa_CISS_signature_present(h)) {
7501 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7503 hpsa_set_driver_support_bits(h);
7504 hpsa_p600_dma_prefetch_quirk(h);
7505 err = hpsa_enter_simple_mode(h);
7507 goto clean4; /* cfgtables, vaddr, intmode+region, pci */
7510 clean4: /* cfgtables, vaddr, intmode+region, pci */
7511 hpsa_free_cfgtables(h);
7512 clean3: /* vaddr, intmode+region, pci */
7515 clean2: /* intmode+region, pci */
7516 hpsa_disable_interrupt_mode(h);
7518 * call pci_disable_device before pci_release_regions per
7519 * Documentation/PCI/pci.txt
7521 pci_disable_device(h->pdev);
7522 pci_release_regions(h->pdev);
7526 static void hpsa_hba_inquiry(struct ctlr_info *h)
7530 #define HBA_INQUIRY_BYTE_COUNT 64
7531 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
7532 if (!h->hba_inquiry_data)
7534 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
7535 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
7537 kfree(h->hba_inquiry_data);
7538 h->hba_inquiry_data = NULL;
7542 static int hpsa_init_reset_devices(struct pci_dev *pdev, u32 board_id)
7545 void __iomem *vaddr;
7550 /* kdump kernel is loading, we don't know in which state is
7551 * the pci interface. The dev->enable_cnt is equal zero
7552 * so we call enable+disable, wait a while and switch it on.
7554 rc = pci_enable_device(pdev);
7556 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
7559 pci_disable_device(pdev);
7560 msleep(260); /* a randomly chosen number */
7561 rc = pci_enable_device(pdev);
7563 dev_warn(&pdev->dev, "failed to enable device.\n");
7567 pci_set_master(pdev);
7569 vaddr = pci_ioremap_bar(pdev, 0);
7570 if (vaddr == NULL) {
7574 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
7577 /* Reset the controller with a PCI power-cycle or via doorbell */
7578 rc = hpsa_kdump_hard_reset_controller(pdev, board_id);
7580 /* -ENOTSUPP here means we cannot reset the controller
7581 * but it's already (and still) up and running in
7582 * "performant mode". Or, it might be 640x, which can't reset
7583 * due to concerns about shared bbwc between 6402/6404 pair.
7588 /* Now try to get the controller to respond to a no-op */
7589 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
7590 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
7591 if (hpsa_noop(pdev) == 0)
7594 dev_warn(&pdev->dev, "no-op failed%s\n",
7595 (i < 11 ? "; re-trying" : ""));
7600 pci_disable_device(pdev);
7604 static void hpsa_free_cmd_pool(struct ctlr_info *h)
7606 kfree(h->cmd_pool_bits);
7607 h->cmd_pool_bits = NULL;
7609 pci_free_consistent(h->pdev,
7610 h->nr_cmds * sizeof(struct CommandList),
7612 h->cmd_pool_dhandle);
7614 h->cmd_pool_dhandle = 0;
7616 if (h->errinfo_pool) {
7617 pci_free_consistent(h->pdev,
7618 h->nr_cmds * sizeof(struct ErrorInfo),
7620 h->errinfo_pool_dhandle);
7621 h->errinfo_pool = NULL;
7622 h->errinfo_pool_dhandle = 0;
7626 static int hpsa_alloc_cmd_pool(struct ctlr_info *h)
7628 h->cmd_pool_bits = kzalloc(
7629 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
7630 sizeof(unsigned long), GFP_KERNEL);
7631 h->cmd_pool = pci_alloc_consistent(h->pdev,
7632 h->nr_cmds * sizeof(*h->cmd_pool),
7633 &(h->cmd_pool_dhandle));
7634 h->errinfo_pool = pci_alloc_consistent(h->pdev,
7635 h->nr_cmds * sizeof(*h->errinfo_pool),
7636 &(h->errinfo_pool_dhandle));
7637 if ((h->cmd_pool_bits == NULL)
7638 || (h->cmd_pool == NULL)
7639 || (h->errinfo_pool == NULL)) {
7640 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
7643 hpsa_preinitialize_commands(h);
7646 hpsa_free_cmd_pool(h);
7650 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
7654 cpu = cpumask_first(cpu_online_mask);
7655 for (i = 0; i < h->msix_vector; i++) {
7656 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
7657 cpu = cpumask_next(cpu, cpu_online_mask);
7661 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
7662 static void hpsa_free_irqs(struct ctlr_info *h)
7666 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
7667 /* Single reply queue, only one irq to free */
7669 irq_set_affinity_hint(h->intr[i], NULL);
7670 free_irq(h->intr[i], &h->q[i]);
7675 for (i = 0; i < h->msix_vector; i++) {
7676 irq_set_affinity_hint(h->intr[i], NULL);
7677 free_irq(h->intr[i], &h->q[i]);
7680 for (; i < MAX_REPLY_QUEUES; i++)
7684 /* returns 0 on success; cleans up and returns -Enn on error */
7685 static int hpsa_request_irqs(struct ctlr_info *h,
7686 irqreturn_t (*msixhandler)(int, void *),
7687 irqreturn_t (*intxhandler)(int, void *))
7692 * initialize h->q[x] = x so that interrupt handlers know which
7695 for (i = 0; i < MAX_REPLY_QUEUES; i++)
7698 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
7699 /* If performant mode and MSI-X, use multiple reply queues */
7700 for (i = 0; i < h->msix_vector; i++) {
7701 sprintf(h->intrname[i], "%s-msix%d", h->devname, i);
7702 rc = request_irq(h->intr[i], msixhandler,
7708 dev_err(&h->pdev->dev,
7709 "failed to get irq %d for %s\n",
7710 h->intr[i], h->devname);
7711 for (j = 0; j < i; j++) {
7712 free_irq(h->intr[j], &h->q[j]);
7715 for (; j < MAX_REPLY_QUEUES; j++)
7720 hpsa_irq_affinity_hints(h);
7722 /* Use single reply pool */
7723 if (h->msix_vector > 0 || h->msi_vector) {
7725 sprintf(h->intrname[h->intr_mode],
7726 "%s-msix", h->devname);
7728 sprintf(h->intrname[h->intr_mode],
7729 "%s-msi", h->devname);
7730 rc = request_irq(h->intr[h->intr_mode],
7732 h->intrname[h->intr_mode],
7733 &h->q[h->intr_mode]);
7735 sprintf(h->intrname[h->intr_mode],
7736 "%s-intx", h->devname);
7737 rc = request_irq(h->intr[h->intr_mode],
7738 intxhandler, IRQF_SHARED,
7739 h->intrname[h->intr_mode],
7740 &h->q[h->intr_mode]);
7742 irq_set_affinity_hint(h->intr[h->intr_mode], NULL);
7745 dev_err(&h->pdev->dev, "failed to get irq %d for %s\n",
7746 h->intr[h->intr_mode], h->devname);
7753 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
7756 hpsa_send_host_reset(h, RAID_CTLR_LUNID, HPSA_RESET_TYPE_CONTROLLER);
7758 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
7759 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY);
7761 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
7765 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
7766 rc = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
7768 dev_warn(&h->pdev->dev, "Board failed to become ready "
7769 "after soft reset.\n");
7776 static void hpsa_free_reply_queues(struct ctlr_info *h)
7780 for (i = 0; i < h->nreply_queues; i++) {
7781 if (!h->reply_queue[i].head)
7783 pci_free_consistent(h->pdev,
7784 h->reply_queue_size,
7785 h->reply_queue[i].head,
7786 h->reply_queue[i].busaddr);
7787 h->reply_queue[i].head = NULL;
7788 h->reply_queue[i].busaddr = 0;
7790 h->reply_queue_size = 0;
7793 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
7795 hpsa_free_performant_mode(h); /* init_one 7 */
7796 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
7797 hpsa_free_cmd_pool(h); /* init_one 5 */
7798 hpsa_free_irqs(h); /* init_one 4 */
7799 scsi_host_put(h->scsi_host); /* init_one 3 */
7800 h->scsi_host = NULL; /* init_one 3 */
7801 hpsa_free_pci_init(h); /* init_one 2_5 */
7802 free_percpu(h->lockup_detected); /* init_one 2 */
7803 h->lockup_detected = NULL; /* init_one 2 */
7804 if (h->resubmit_wq) {
7805 destroy_workqueue(h->resubmit_wq); /* init_one 1 */
7806 h->resubmit_wq = NULL;
7808 if (h->rescan_ctlr_wq) {
7809 destroy_workqueue(h->rescan_ctlr_wq);
7810 h->rescan_ctlr_wq = NULL;
7812 kfree(h); /* init_one 1 */
7815 /* Called when controller lockup detected. */
7816 static void fail_all_outstanding_cmds(struct ctlr_info *h)
7819 struct CommandList *c;
7822 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
7823 for (i = 0; i < h->nr_cmds; i++) {
7824 c = h->cmd_pool + i;
7825 refcount = atomic_inc_return(&c->refcount);
7827 c->err_info->CommandStatus = CMD_CTLR_LOCKUP;
7829 atomic_dec(&h->commands_outstanding);
7834 dev_warn(&h->pdev->dev,
7835 "failed %d commands in fail_all\n", failcount);
7838 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
7842 for_each_online_cpu(cpu) {
7843 u32 *lockup_detected;
7844 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
7845 *lockup_detected = value;
7847 wmb(); /* be sure the per-cpu variables are out to memory */
7850 static void controller_lockup_detected(struct ctlr_info *h)
7852 unsigned long flags;
7853 u32 lockup_detected;
7855 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7856 spin_lock_irqsave(&h->lock, flags);
7857 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
7858 if (!lockup_detected) {
7859 /* no heartbeat, but controller gave us a zero. */
7860 dev_warn(&h->pdev->dev,
7861 "lockup detected after %d but scratchpad register is zero\n",
7862 h->heartbeat_sample_interval / HZ);
7863 lockup_detected = 0xffffffff;
7865 set_lockup_detected_for_all_cpus(h, lockup_detected);
7866 spin_unlock_irqrestore(&h->lock, flags);
7867 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x after %d\n",
7868 lockup_detected, h->heartbeat_sample_interval / HZ);
7869 pci_disable_device(h->pdev);
7870 fail_all_outstanding_cmds(h);
7873 static int detect_controller_lockup(struct ctlr_info *h)
7877 unsigned long flags;
7879 now = get_jiffies_64();
7880 /* If we've received an interrupt recently, we're ok. */
7881 if (time_after64(h->last_intr_timestamp +
7882 (h->heartbeat_sample_interval), now))
7886 * If we've already checked the heartbeat recently, we're ok.
7887 * This could happen if someone sends us a signal. We
7888 * otherwise don't care about signals in this thread.
7890 if (time_after64(h->last_heartbeat_timestamp +
7891 (h->heartbeat_sample_interval), now))
7894 /* If heartbeat has not changed since we last looked, we're not ok. */
7895 spin_lock_irqsave(&h->lock, flags);
7896 heartbeat = readl(&h->cfgtable->HeartBeat);
7897 spin_unlock_irqrestore(&h->lock, flags);
7898 if (h->last_heartbeat == heartbeat) {
7899 controller_lockup_detected(h);
7904 h->last_heartbeat = heartbeat;
7905 h->last_heartbeat_timestamp = now;
7909 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
7914 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7917 /* Ask the controller to clear the events we're handling. */
7918 if ((h->transMethod & (CFGTBL_Trans_io_accel1
7919 | CFGTBL_Trans_io_accel2)) &&
7920 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
7921 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
7923 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
7924 event_type = "state change";
7925 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
7926 event_type = "configuration change";
7927 /* Stop sending new RAID offload reqs via the IO accelerator */
7928 scsi_block_requests(h->scsi_host);
7929 for (i = 0; i < h->ndevices; i++)
7930 h->dev[i]->offload_enabled = 0;
7931 hpsa_drain_accel_commands(h);
7932 /* Set 'accelerator path config change' bit */
7933 dev_warn(&h->pdev->dev,
7934 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
7935 h->events, event_type);
7936 writel(h->events, &(h->cfgtable->clear_event_notify));
7937 /* Set the "clear event notify field update" bit 6 */
7938 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7939 /* Wait until ctlr clears 'clear event notify field', bit 6 */
7940 hpsa_wait_for_clear_event_notify_ack(h);
7941 scsi_unblock_requests(h->scsi_host);
7943 /* Acknowledge controller notification events. */
7944 writel(h->events, &(h->cfgtable->clear_event_notify));
7945 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
7946 hpsa_wait_for_clear_event_notify_ack(h);
7948 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7949 hpsa_wait_for_mode_change_ack(h);
7955 /* Check a register on the controller to see if there are configuration
7956 * changes (added/changed/removed logical drives, etc.) which mean that
7957 * we should rescan the controller for devices.
7958 * Also check flag for driver-initiated rescan.
7960 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
7962 if (h->drv_req_rescan) {
7963 h->drv_req_rescan = 0;
7967 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
7970 h->events = readl(&(h->cfgtable->event_notify));
7971 return h->events & RESCAN_REQUIRED_EVENT_BITS;
7975 * Check if any of the offline devices have become ready
7977 static int hpsa_offline_devices_ready(struct ctlr_info *h)
7979 unsigned long flags;
7980 struct offline_device_entry *d;
7981 struct list_head *this, *tmp;
7983 spin_lock_irqsave(&h->offline_device_lock, flags);
7984 list_for_each_safe(this, tmp, &h->offline_device_list) {
7985 d = list_entry(this, struct offline_device_entry,
7987 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7988 if (!hpsa_volume_offline(h, d->scsi3addr)) {
7989 spin_lock_irqsave(&h->offline_device_lock, flags);
7990 list_del(&d->offline_list);
7991 spin_unlock_irqrestore(&h->offline_device_lock, flags);
7994 spin_lock_irqsave(&h->offline_device_lock, flags);
7996 spin_unlock_irqrestore(&h->offline_device_lock, flags);
8000 static void hpsa_rescan_ctlr_worker(struct work_struct *work)
8002 unsigned long flags;
8003 struct ctlr_info *h = container_of(to_delayed_work(work),
8004 struct ctlr_info, rescan_ctlr_work);
8007 if (h->remove_in_progress)
8010 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
8011 scsi_host_get(h->scsi_host);
8012 hpsa_ack_ctlr_events(h);
8013 hpsa_scan_start(h->scsi_host);
8014 scsi_host_put(h->scsi_host);
8016 spin_lock_irqsave(&h->lock, flags);
8017 if (!h->remove_in_progress)
8018 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8019 h->heartbeat_sample_interval);
8020 spin_unlock_irqrestore(&h->lock, flags);
8023 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
8025 unsigned long flags;
8026 struct ctlr_info *h = container_of(to_delayed_work(work),
8027 struct ctlr_info, monitor_ctlr_work);
8029 detect_controller_lockup(h);
8030 if (lockup_detected(h))
8033 spin_lock_irqsave(&h->lock, flags);
8034 if (!h->remove_in_progress)
8035 schedule_delayed_work(&h->monitor_ctlr_work,
8036 h->heartbeat_sample_interval);
8037 spin_unlock_irqrestore(&h->lock, flags);
8040 static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
8043 struct workqueue_struct *wq = NULL;
8045 wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
8047 dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
8052 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8055 struct ctlr_info *h;
8056 int try_soft_reset = 0;
8057 unsigned long flags;
8060 if (number_of_controllers == 0)
8061 printk(KERN_INFO DRIVER_NAME "\n");
8063 rc = hpsa_lookup_board_id(pdev, &board_id);
8065 dev_warn(&pdev->dev, "Board ID not found\n");
8069 rc = hpsa_init_reset_devices(pdev, board_id);
8071 if (rc != -ENOTSUPP)
8073 /* If the reset fails in a particular way (it has no way to do
8074 * a proper hard reset, so returns -ENOTSUPP) we can try to do
8075 * a soft reset once we get the controller configured up to the
8076 * point that it can accept a command.
8082 reinit_after_soft_reset:
8084 /* Command structures must be aligned on a 32-byte boundary because
8085 * the 5 lower bits of the address are used by the hardware. and by
8086 * the driver. See comments in hpsa.h for more info.
8088 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
8089 h = kzalloc(sizeof(*h), GFP_KERNEL);
8091 dev_err(&pdev->dev, "Failed to allocate controller head\n");
8097 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
8098 INIT_LIST_HEAD(&h->offline_device_list);
8099 spin_lock_init(&h->lock);
8100 spin_lock_init(&h->offline_device_lock);
8101 spin_lock_init(&h->scan_lock);
8102 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
8103 atomic_set(&h->abort_cmds_available, HPSA_CMDS_RESERVED_FOR_ABORTS);
8105 /* Allocate and clear per-cpu variable lockup_detected */
8106 h->lockup_detected = alloc_percpu(u32);
8107 if (!h->lockup_detected) {
8108 dev_err(&h->pdev->dev, "Failed to allocate lockup detector\n");
8110 goto clean1; /* aer/h */
8112 set_lockup_detected_for_all_cpus(h, 0);
8114 rc = hpsa_pci_init(h);
8116 goto clean2; /* lu, aer/h */
8118 /* relies on h-> settings made by hpsa_pci_init, including
8119 * interrupt_mode h->intr */
8120 rc = hpsa_scsi_host_alloc(h);
8122 goto clean2_5; /* pci, lu, aer/h */
8124 sprintf(h->devname, HPSA "%d", h->scsi_host->host_no);
8125 h->ctlr = number_of_controllers;
8126 number_of_controllers++;
8128 /* configure PCI DMA stuff */
8129 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8133 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8137 dev_err(&pdev->dev, "no suitable DMA available\n");
8138 goto clean3; /* shost, pci, lu, aer/h */
8142 /* make sure the board interrupts are off */
8143 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8145 rc = hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx);
8147 goto clean3; /* shost, pci, lu, aer/h */
8148 rc = hpsa_alloc_cmd_pool(h);
8150 goto clean4; /* irq, shost, pci, lu, aer/h */
8151 rc = hpsa_alloc_sg_chain_blocks(h);
8153 goto clean5; /* cmd, irq, shost, pci, lu, aer/h */
8154 init_waitqueue_head(&h->scan_wait_queue);
8155 init_waitqueue_head(&h->abort_cmd_wait_queue);
8156 init_waitqueue_head(&h->event_sync_wait_queue);
8157 mutex_init(&h->reset_mutex);
8158 h->scan_finished = 1; /* no scan currently in progress */
8160 pci_set_drvdata(pdev, h);
8163 spin_lock_init(&h->devlock);
8164 rc = hpsa_put_ctlr_into_performant_mode(h);
8166 goto clean6; /* sg, cmd, irq, shost, pci, lu, aer/h */
8168 /* hook into SCSI subsystem */
8169 rc = hpsa_scsi_add_host(h);
8171 goto clean7; /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8173 /* create the resubmit workqueue */
8174 h->rescan_ctlr_wq = hpsa_create_controller_wq(h, "rescan");
8175 if (!h->rescan_ctlr_wq) {
8180 h->resubmit_wq = hpsa_create_controller_wq(h, "resubmit");
8181 if (!h->resubmit_wq) {
8183 goto clean7; /* aer/h */
8187 * At this point, the controller is ready to take commands.
8188 * Now, if reset_devices and the hard reset didn't work, try
8189 * the soft reset and see if that works.
8191 if (try_soft_reset) {
8193 /* This is kind of gross. We may or may not get a completion
8194 * from the soft reset command, and if we do, then the value
8195 * from the fifo may or may not be valid. So, we wait 10 secs
8196 * after the reset throwing away any completions we get during
8197 * that time. Unregister the interrupt handler and register
8198 * fake ones to scoop up any residual completions.
8200 spin_lock_irqsave(&h->lock, flags);
8201 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8202 spin_unlock_irqrestore(&h->lock, flags);
8204 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
8205 hpsa_intx_discard_completions);
8207 dev_warn(&h->pdev->dev,
8208 "Failed to request_irq after soft reset.\n");
8210 * cannot goto clean7 or free_irqs will be called
8211 * again. Instead, do its work
8213 hpsa_free_performant_mode(h); /* clean7 */
8214 hpsa_free_sg_chain_blocks(h); /* clean6 */
8215 hpsa_free_cmd_pool(h); /* clean5 */
8217 * skip hpsa_free_irqs(h) clean4 since that
8218 * was just called before request_irqs failed
8223 rc = hpsa_kdump_soft_reset(h);
8225 /* Neither hard nor soft reset worked, we're hosed. */
8228 dev_info(&h->pdev->dev, "Board READY.\n");
8229 dev_info(&h->pdev->dev,
8230 "Waiting for stale completions to drain.\n");
8231 h->access.set_intr_mask(h, HPSA_INTR_ON);
8233 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8235 rc = controller_reset_failed(h->cfgtable);
8237 dev_info(&h->pdev->dev,
8238 "Soft reset appears to have failed.\n");
8240 /* since the controller's reset, we have to go back and re-init
8241 * everything. Easiest to just forget what we've done and do it
8244 hpsa_undo_allocations_after_kdump_soft_reset(h);
8247 /* don't goto clean, we already unallocated */
8250 goto reinit_after_soft_reset;
8253 /* Enable Accelerated IO path at driver layer */
8254 h->acciopath_status = 1;
8257 /* Turn the interrupts on so we can service requests */
8258 h->access.set_intr_mask(h, HPSA_INTR_ON);
8260 hpsa_hba_inquiry(h);
8262 /* Monitor the controller for firmware lockups */
8263 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
8264 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
8265 schedule_delayed_work(&h->monitor_ctlr_work,
8266 h->heartbeat_sample_interval);
8267 INIT_DELAYED_WORK(&h->rescan_ctlr_work, hpsa_rescan_ctlr_worker);
8268 queue_delayed_work(h->rescan_ctlr_wq, &h->rescan_ctlr_work,
8269 h->heartbeat_sample_interval);
8272 clean7: /* perf, sg, cmd, irq, shost, pci, lu, aer/h */
8273 hpsa_free_performant_mode(h);
8274 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8275 clean6: /* sg, cmd, irq, pci, lockup, wq/aer/h */
8276 hpsa_free_sg_chain_blocks(h);
8277 clean5: /* cmd, irq, shost, pci, lu, aer/h */
8278 hpsa_free_cmd_pool(h);
8279 clean4: /* irq, shost, pci, lu, aer/h */
8281 clean3: /* shost, pci, lu, aer/h */
8282 scsi_host_put(h->scsi_host);
8283 h->scsi_host = NULL;
8284 clean2_5: /* pci, lu, aer/h */
8285 hpsa_free_pci_init(h);
8286 clean2: /* lu, aer/h */
8287 if (h->lockup_detected) {
8288 free_percpu(h->lockup_detected);
8289 h->lockup_detected = NULL;
8291 clean1: /* wq/aer/h */
8292 if (h->resubmit_wq) {
8293 destroy_workqueue(h->resubmit_wq);
8294 h->resubmit_wq = NULL;
8296 if (h->rescan_ctlr_wq) {
8297 destroy_workqueue(h->rescan_ctlr_wq);
8298 h->rescan_ctlr_wq = NULL;
8304 static void hpsa_flush_cache(struct ctlr_info *h)
8307 struct CommandList *c;
8310 if (unlikely(lockup_detected(h)))
8312 flush_buf = kzalloc(4, GFP_KERNEL);
8318 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
8319 RAID_CTLR_LUNID, TYPE_CMD)) {
8322 rc = hpsa_scsi_do_simple_cmd_with_retry(h, c,
8323 PCI_DMA_TODEVICE, NO_TIMEOUT);
8326 if (c->err_info->CommandStatus != 0)
8328 dev_warn(&h->pdev->dev,
8329 "error flushing cache on controller\n");
8334 static void hpsa_shutdown(struct pci_dev *pdev)
8336 struct ctlr_info *h;
8338 h = pci_get_drvdata(pdev);
8339 /* Turn board interrupts off and send the flush cache command
8340 * sendcmd will turn off interrupt, and send the flush...
8341 * To write all data in the battery backed cache to disks
8343 hpsa_flush_cache(h);
8344 h->access.set_intr_mask(h, HPSA_INTR_OFF);
8345 hpsa_free_irqs(h); /* init_one 4 */
8346 hpsa_disable_interrupt_mode(h); /* pci_init 2 */
8349 static void hpsa_free_device_info(struct ctlr_info *h)
8353 for (i = 0; i < h->ndevices; i++) {
8359 static void hpsa_remove_one(struct pci_dev *pdev)
8361 struct ctlr_info *h;
8362 unsigned long flags;
8364 if (pci_get_drvdata(pdev) == NULL) {
8365 dev_err(&pdev->dev, "unable to remove device\n");
8368 h = pci_get_drvdata(pdev);
8370 /* Get rid of any controller monitoring work items */
8371 spin_lock_irqsave(&h->lock, flags);
8372 h->remove_in_progress = 1;
8373 spin_unlock_irqrestore(&h->lock, flags);
8374 cancel_delayed_work_sync(&h->monitor_ctlr_work);
8375 cancel_delayed_work_sync(&h->rescan_ctlr_work);
8376 destroy_workqueue(h->rescan_ctlr_wq);
8377 destroy_workqueue(h->resubmit_wq);
8380 * Call before disabling interrupts.
8381 * scsi_remove_host can trigger I/O operations especially
8382 * when multipath is enabled. There can be SYNCHRONIZE CACHE
8383 * operations which cannot complete and will hang the system.
8386 scsi_remove_host(h->scsi_host); /* init_one 8 */
8387 /* includes hpsa_free_irqs - init_one 4 */
8388 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8389 hpsa_shutdown(pdev);
8391 hpsa_free_device_info(h); /* scan */
8393 kfree(h->hba_inquiry_data); /* init_one 10 */
8394 h->hba_inquiry_data = NULL; /* init_one 10 */
8395 hpsa_free_ioaccel2_sg_chain_blocks(h);
8396 hpsa_free_performant_mode(h); /* init_one 7 */
8397 hpsa_free_sg_chain_blocks(h); /* init_one 6 */
8398 hpsa_free_cmd_pool(h); /* init_one 5 */
8400 /* hpsa_free_irqs already called via hpsa_shutdown init_one 4 */
8402 scsi_host_put(h->scsi_host); /* init_one 3 */
8403 h->scsi_host = NULL; /* init_one 3 */
8405 /* includes hpsa_disable_interrupt_mode - pci_init 2 */
8406 hpsa_free_pci_init(h); /* init_one 2.5 */
8408 free_percpu(h->lockup_detected); /* init_one 2 */
8409 h->lockup_detected = NULL; /* init_one 2 */
8410 /* (void) pci_disable_pcie_error_reporting(pdev); */ /* init_one 1 */
8411 kfree(h); /* init_one 1 */
8414 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
8415 __attribute__((unused)) pm_message_t state)
8420 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
8425 static struct pci_driver hpsa_pci_driver = {
8427 .probe = hpsa_init_one,
8428 .remove = hpsa_remove_one,
8429 .id_table = hpsa_pci_device_id, /* id_table */
8430 .shutdown = hpsa_shutdown,
8431 .suspend = hpsa_suspend,
8432 .resume = hpsa_resume,
8435 /* Fill in bucket_map[], given nsgs (the max number of
8436 * scatter gather elements supported) and bucket[],
8437 * which is an array of 8 integers. The bucket[] array
8438 * contains 8 different DMA transfer sizes (in 16
8439 * byte increments) which the controller uses to fetch
8440 * commands. This function fills in bucket_map[], which
8441 * maps a given number of scatter gather elements to one of
8442 * the 8 DMA transfer sizes. The point of it is to allow the
8443 * controller to only do as much DMA as needed to fetch the
8444 * command, with the DMA transfer size encoded in the lower
8445 * bits of the command address.
8447 static void calc_bucket_map(int bucket[], int num_buckets,
8448 int nsgs, int min_blocks, u32 *bucket_map)
8452 /* Note, bucket_map must have nsgs+1 entries. */
8453 for (i = 0; i <= nsgs; i++) {
8454 /* Compute size of a command with i SG entries */
8455 size = i + min_blocks;
8456 b = num_buckets; /* Assume the biggest bucket */
8457 /* Find the bucket that is just big enough */
8458 for (j = 0; j < num_buckets; j++) {
8459 if (bucket[j] >= size) {
8464 /* for a command with i SG entries, use bucket b. */
8470 * return -ENODEV on err, 0 on success (or no action)
8471 * allocates numerous items that must be freed later
8473 static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
8476 unsigned long register_value;
8477 unsigned long transMethod = CFGTBL_Trans_Performant |
8478 (trans_support & CFGTBL_Trans_use_short_tags) |
8479 CFGTBL_Trans_enable_directed_msix |
8480 (trans_support & (CFGTBL_Trans_io_accel1 |
8481 CFGTBL_Trans_io_accel2));
8482 struct access_method access = SA5_performant_access;
8484 /* This is a bit complicated. There are 8 registers on
8485 * the controller which we write to to tell it 8 different
8486 * sizes of commands which there may be. It's a way of
8487 * reducing the DMA done to fetch each command. Encoded into
8488 * each command's tag are 3 bits which communicate to the controller
8489 * which of the eight sizes that command fits within. The size of
8490 * each command depends on how many scatter gather entries there are.
8491 * Each SG entry requires 16 bytes. The eight registers are programmed
8492 * with the number of 16-byte blocks a command of that size requires.
8493 * The smallest command possible requires 5 such 16 byte blocks.
8494 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
8495 * blocks. Note, this only extends to the SG entries contained
8496 * within the command block, and does not extend to chained blocks
8497 * of SG elements. bft[] contains the eight values we write to
8498 * the registers. They are not evenly distributed, but have more
8499 * sizes for small commands, and fewer sizes for larger commands.
8501 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
8502 #define MIN_IOACCEL2_BFT_ENTRY 5
8503 #define HPSA_IOACCEL2_HEADER_SZ 4
8504 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
8505 13, 14, 15, 16, 17, 18, 19,
8506 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
8507 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
8508 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
8509 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
8510 16 * MIN_IOACCEL2_BFT_ENTRY);
8511 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
8512 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
8513 /* 5 = 1 s/g entry or 4k
8514 * 6 = 2 s/g entry or 8k
8515 * 8 = 4 s/g entry or 16k
8516 * 10 = 6 s/g entry or 24k
8519 /* If the controller supports either ioaccel method then
8520 * we can also use the RAID stack submit path that does not
8521 * perform the superfluous readl() after each command submission.
8523 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
8524 access = SA5_performant_access_no_read;
8526 /* Controller spec: zero out this buffer. */
8527 for (i = 0; i < h->nreply_queues; i++)
8528 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
8530 bft[7] = SG_ENTRIES_IN_CMD + 4;
8531 calc_bucket_map(bft, ARRAY_SIZE(bft),
8532 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
8533 for (i = 0; i < 8; i++)
8534 writel(bft[i], &h->transtable->BlockFetch[i]);
8536 /* size of controller ring buffer */
8537 writel(h->max_commands, &h->transtable->RepQSize);
8538 writel(h->nreply_queues, &h->transtable->RepQCount);
8539 writel(0, &h->transtable->RepQCtrAddrLow32);
8540 writel(0, &h->transtable->RepQCtrAddrHigh32);
8542 for (i = 0; i < h->nreply_queues; i++) {
8543 writel(0, &h->transtable->RepQAddr[i].upper);
8544 writel(h->reply_queue[i].busaddr,
8545 &h->transtable->RepQAddr[i].lower);
8548 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
8549 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
8551 * enable outbound interrupt coalescing in accelerator mode;
8553 if (trans_support & CFGTBL_Trans_io_accel1) {
8554 access = SA5_ioaccel_mode1_access;
8555 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8556 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8558 if (trans_support & CFGTBL_Trans_io_accel2) {
8559 access = SA5_ioaccel_mode2_access;
8560 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
8561 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
8564 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8565 if (hpsa_wait_for_mode_change_ack(h)) {
8566 dev_err(&h->pdev->dev,
8567 "performant mode problem - doorbell timeout\n");
8570 register_value = readl(&(h->cfgtable->TransportActive));
8571 if (!(register_value & CFGTBL_Trans_Performant)) {
8572 dev_err(&h->pdev->dev,
8573 "performant mode problem - transport not active\n");
8576 /* Change the access methods to the performant access methods */
8578 h->transMethod = transMethod;
8580 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
8581 (trans_support & CFGTBL_Trans_io_accel2)))
8584 if (trans_support & CFGTBL_Trans_io_accel1) {
8585 /* Set up I/O accelerator mode */
8586 for (i = 0; i < h->nreply_queues; i++) {
8587 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
8588 h->reply_queue[i].current_entry =
8589 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
8591 bft[7] = h->ioaccel_maxsg + 8;
8592 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
8593 h->ioaccel1_blockFetchTable);
8595 /* initialize all reply queue entries to unused */
8596 for (i = 0; i < h->nreply_queues; i++)
8597 memset(h->reply_queue[i].head,
8598 (u8) IOACCEL_MODE1_REPLY_UNUSED,
8599 h->reply_queue_size);
8601 /* set all the constant fields in the accelerator command
8602 * frames once at init time to save CPU cycles later.
8604 for (i = 0; i < h->nr_cmds; i++) {
8605 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
8607 cp->function = IOACCEL1_FUNCTION_SCSIIO;
8608 cp->err_info = (u32) (h->errinfo_pool_dhandle +
8609 (i * sizeof(struct ErrorInfo)));
8610 cp->err_info_len = sizeof(struct ErrorInfo);
8611 cp->sgl_offset = IOACCEL1_SGLOFFSET;
8612 cp->host_context_flags =
8613 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
8614 cp->timeout_sec = 0;
8617 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
8619 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
8620 (i * sizeof(struct io_accel1_cmd)));
8622 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8623 u64 cfg_offset, cfg_base_addr_index;
8624 u32 bft2_offset, cfg_base_addr;
8627 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
8628 &cfg_base_addr_index, &cfg_offset);
8629 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
8630 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
8631 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
8632 4, h->ioaccel2_blockFetchTable);
8633 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
8634 BUILD_BUG_ON(offsetof(struct CfgTable,
8635 io_accel_request_size_offset) != 0xb8);
8636 h->ioaccel2_bft2_regs =
8637 remap_pci_mem(pci_resource_start(h->pdev,
8638 cfg_base_addr_index) +
8639 cfg_offset + bft2_offset,
8641 sizeof(*h->ioaccel2_bft2_regs));
8642 for (i = 0; i < ARRAY_SIZE(bft2); i++)
8643 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
8645 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
8646 if (hpsa_wait_for_mode_change_ack(h)) {
8647 dev_err(&h->pdev->dev,
8648 "performant mode problem - enabling ioaccel mode\n");
8654 /* Free ioaccel1 mode command blocks and block fetch table */
8655 static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8657 if (h->ioaccel_cmd_pool) {
8658 pci_free_consistent(h->pdev,
8659 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8660 h->ioaccel_cmd_pool,
8661 h->ioaccel_cmd_pool_dhandle);
8662 h->ioaccel_cmd_pool = NULL;
8663 h->ioaccel_cmd_pool_dhandle = 0;
8665 kfree(h->ioaccel1_blockFetchTable);
8666 h->ioaccel1_blockFetchTable = NULL;
8669 /* Allocate ioaccel1 mode command blocks and block fetch table */
8670 static int hpsa_alloc_ioaccel1_cmd_and_bft(struct ctlr_info *h)
8673 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8674 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
8675 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
8677 /* Command structures must be aligned on a 128-byte boundary
8678 * because the 7 lower bits of the address are used by the
8681 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
8682 IOACCEL1_COMMANDLIST_ALIGNMENT);
8683 h->ioaccel_cmd_pool =
8684 pci_alloc_consistent(h->pdev,
8685 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
8686 &(h->ioaccel_cmd_pool_dhandle));
8688 h->ioaccel1_blockFetchTable =
8689 kmalloc(((h->ioaccel_maxsg + 1) *
8690 sizeof(u32)), GFP_KERNEL);
8692 if ((h->ioaccel_cmd_pool == NULL) ||
8693 (h->ioaccel1_blockFetchTable == NULL))
8696 memset(h->ioaccel_cmd_pool, 0,
8697 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
8701 hpsa_free_ioaccel1_cmd_and_bft(h);
8705 /* Free ioaccel2 mode command blocks and block fetch table */
8706 static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8708 hpsa_free_ioaccel2_sg_chain_blocks(h);
8710 if (h->ioaccel2_cmd_pool) {
8711 pci_free_consistent(h->pdev,
8712 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8713 h->ioaccel2_cmd_pool,
8714 h->ioaccel2_cmd_pool_dhandle);
8715 h->ioaccel2_cmd_pool = NULL;
8716 h->ioaccel2_cmd_pool_dhandle = 0;
8718 kfree(h->ioaccel2_blockFetchTable);
8719 h->ioaccel2_blockFetchTable = NULL;
8722 /* Allocate ioaccel2 mode command blocks and block fetch table */
8723 static int hpsa_alloc_ioaccel2_cmd_and_bft(struct ctlr_info *h)
8727 /* Allocate ioaccel2 mode command blocks and block fetch table */
8730 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
8731 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
8732 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
8734 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
8735 IOACCEL2_COMMANDLIST_ALIGNMENT);
8736 h->ioaccel2_cmd_pool =
8737 pci_alloc_consistent(h->pdev,
8738 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
8739 &(h->ioaccel2_cmd_pool_dhandle));
8741 h->ioaccel2_blockFetchTable =
8742 kmalloc(((h->ioaccel_maxsg + 1) *
8743 sizeof(u32)), GFP_KERNEL);
8745 if ((h->ioaccel2_cmd_pool == NULL) ||
8746 (h->ioaccel2_blockFetchTable == NULL)) {
8751 rc = hpsa_allocate_ioaccel2_sg_chain_blocks(h);
8755 memset(h->ioaccel2_cmd_pool, 0,
8756 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
8760 hpsa_free_ioaccel2_cmd_and_bft(h);
8764 /* Free items allocated by hpsa_put_ctlr_into_performant_mode */
8765 static void hpsa_free_performant_mode(struct ctlr_info *h)
8767 kfree(h->blockFetchTable);
8768 h->blockFetchTable = NULL;
8769 hpsa_free_reply_queues(h);
8770 hpsa_free_ioaccel1_cmd_and_bft(h);
8771 hpsa_free_ioaccel2_cmd_and_bft(h);
8774 /* return -ENODEV on error, 0 on success (or no action)
8775 * allocates numerous items that must be freed later
8777 static int hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
8780 unsigned long transMethod = CFGTBL_Trans_Performant |
8781 CFGTBL_Trans_use_short_tags;
8784 if (hpsa_simple_mode)
8787 trans_support = readl(&(h->cfgtable->TransportSupport));
8788 if (!(trans_support & PERFORMANT_MODE))
8791 /* Check for I/O accelerator mode support */
8792 if (trans_support & CFGTBL_Trans_io_accel1) {
8793 transMethod |= CFGTBL_Trans_io_accel1 |
8794 CFGTBL_Trans_enable_directed_msix;
8795 rc = hpsa_alloc_ioaccel1_cmd_and_bft(h);
8798 } else if (trans_support & CFGTBL_Trans_io_accel2) {
8799 transMethod |= CFGTBL_Trans_io_accel2 |
8800 CFGTBL_Trans_enable_directed_msix;
8801 rc = hpsa_alloc_ioaccel2_cmd_and_bft(h);
8806 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
8807 hpsa_get_max_perf_mode_cmds(h);
8808 /* Performant mode ring buffer and supporting data structures */
8809 h->reply_queue_size = h->max_commands * sizeof(u64);
8811 for (i = 0; i < h->nreply_queues; i++) {
8812 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
8813 h->reply_queue_size,
8814 &(h->reply_queue[i].busaddr));
8815 if (!h->reply_queue[i].head) {
8817 goto clean1; /* rq, ioaccel */
8819 h->reply_queue[i].size = h->max_commands;
8820 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
8821 h->reply_queue[i].current_entry = 0;
8824 /* Need a block fetch table for performant mode */
8825 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
8826 sizeof(u32)), GFP_KERNEL);
8827 if (!h->blockFetchTable) {
8829 goto clean1; /* rq, ioaccel */
8832 rc = hpsa_enter_performant_mode(h, trans_support);
8834 goto clean2; /* bft, rq, ioaccel */
8837 clean2: /* bft, rq, ioaccel */
8838 kfree(h->blockFetchTable);
8839 h->blockFetchTable = NULL;
8840 clean1: /* rq, ioaccel */
8841 hpsa_free_reply_queues(h);
8842 hpsa_free_ioaccel1_cmd_and_bft(h);
8843 hpsa_free_ioaccel2_cmd_and_bft(h);
8847 static int is_accelerated_cmd(struct CommandList *c)
8849 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
8852 static void hpsa_drain_accel_commands(struct ctlr_info *h)
8854 struct CommandList *c = NULL;
8855 int i, accel_cmds_out;
8858 do { /* wait for all outstanding ioaccel commands to drain out */
8860 for (i = 0; i < h->nr_cmds; i++) {
8861 c = h->cmd_pool + i;
8862 refcount = atomic_inc_return(&c->refcount);
8863 if (refcount > 1) /* Command is allocated */
8864 accel_cmds_out += is_accelerated_cmd(c);
8867 if (accel_cmds_out <= 0)
8874 * This is it. Register the PCI driver information for the cards we control
8875 * the OS will call our registered routines when it finds one of our cards.
8877 static int __init hpsa_init(void)
8879 return pci_register_driver(&hpsa_pci_driver);
8882 static void __exit hpsa_cleanup(void)
8884 pci_unregister_driver(&hpsa_pci_driver);
8887 static void __attribute__((unused)) verify_offsets(void)
8889 #define VERIFY_OFFSET(member, offset) \
8890 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
8892 VERIFY_OFFSET(structure_size, 0);
8893 VERIFY_OFFSET(volume_blk_size, 4);
8894 VERIFY_OFFSET(volume_blk_cnt, 8);
8895 VERIFY_OFFSET(phys_blk_shift, 16);
8896 VERIFY_OFFSET(parity_rotation_shift, 17);
8897 VERIFY_OFFSET(strip_size, 18);
8898 VERIFY_OFFSET(disk_starting_blk, 20);
8899 VERIFY_OFFSET(disk_blk_cnt, 28);
8900 VERIFY_OFFSET(data_disks_per_row, 36);
8901 VERIFY_OFFSET(metadata_disks_per_row, 38);
8902 VERIFY_OFFSET(row_cnt, 40);
8903 VERIFY_OFFSET(layout_map_count, 42);
8904 VERIFY_OFFSET(flags, 44);
8905 VERIFY_OFFSET(dekindex, 46);
8906 /* VERIFY_OFFSET(reserved, 48 */
8907 VERIFY_OFFSET(data, 64);
8909 #undef VERIFY_OFFSET
8911 #define VERIFY_OFFSET(member, offset) \
8912 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
8914 VERIFY_OFFSET(IU_type, 0);
8915 VERIFY_OFFSET(direction, 1);
8916 VERIFY_OFFSET(reply_queue, 2);
8917 /* VERIFY_OFFSET(reserved1, 3); */
8918 VERIFY_OFFSET(scsi_nexus, 4);
8919 VERIFY_OFFSET(Tag, 8);
8920 VERIFY_OFFSET(cdb, 16);
8921 VERIFY_OFFSET(cciss_lun, 32);
8922 VERIFY_OFFSET(data_len, 40);
8923 VERIFY_OFFSET(cmd_priority_task_attr, 44);
8924 VERIFY_OFFSET(sg_count, 45);
8925 /* VERIFY_OFFSET(reserved3 */
8926 VERIFY_OFFSET(err_ptr, 48);
8927 VERIFY_OFFSET(err_len, 56);
8928 /* VERIFY_OFFSET(reserved4 */
8929 VERIFY_OFFSET(sg, 64);
8931 #undef VERIFY_OFFSET
8933 #define VERIFY_OFFSET(member, offset) \
8934 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
8936 VERIFY_OFFSET(dev_handle, 0x00);
8937 VERIFY_OFFSET(reserved1, 0x02);
8938 VERIFY_OFFSET(function, 0x03);
8939 VERIFY_OFFSET(reserved2, 0x04);
8940 VERIFY_OFFSET(err_info, 0x0C);
8941 VERIFY_OFFSET(reserved3, 0x10);
8942 VERIFY_OFFSET(err_info_len, 0x12);
8943 VERIFY_OFFSET(reserved4, 0x13);
8944 VERIFY_OFFSET(sgl_offset, 0x14);
8945 VERIFY_OFFSET(reserved5, 0x15);
8946 VERIFY_OFFSET(transfer_len, 0x1C);
8947 VERIFY_OFFSET(reserved6, 0x20);
8948 VERIFY_OFFSET(io_flags, 0x24);
8949 VERIFY_OFFSET(reserved7, 0x26);
8950 VERIFY_OFFSET(LUN, 0x34);
8951 VERIFY_OFFSET(control, 0x3C);
8952 VERIFY_OFFSET(CDB, 0x40);
8953 VERIFY_OFFSET(reserved8, 0x50);
8954 VERIFY_OFFSET(host_context_flags, 0x60);
8955 VERIFY_OFFSET(timeout_sec, 0x62);
8956 VERIFY_OFFSET(ReplyQueue, 0x64);
8957 VERIFY_OFFSET(reserved9, 0x65);
8958 VERIFY_OFFSET(tag, 0x68);
8959 VERIFY_OFFSET(host_addr, 0x70);
8960 VERIFY_OFFSET(CISS_LUN, 0x78);
8961 VERIFY_OFFSET(SG, 0x78 + 8);
8962 #undef VERIFY_OFFSET
8965 module_init(hpsa_init);
8966 module_exit(hpsa_cleanup);