2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
63 /* How long to wait (in milliseconds) for board to go into simple mode */
64 #define MAX_CONFIG_WAIT 30000
65 #define MAX_IOCTL_CONFIG_WAIT 1000
67 /*define how many times we will try a command because of bus resets */
68 #define MAX_CMD_RETRIES 3
70 /* Embedded module documentation macros - see modules.h */
71 MODULE_AUTHOR("Hewlett-Packard Company");
72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
75 MODULE_VERSION(HPSA_DRIVER_VERSION);
76 MODULE_LICENSE("GPL");
78 static int hpsa_allow_any;
79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
80 MODULE_PARM_DESC(hpsa_allow_any,
81 "Allow hpsa driver to access unknown HP Smart Array hardware");
82 static int hpsa_simple_mode;
83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
84 MODULE_PARM_DESC(hpsa_simple_mode,
85 "Use 'simple mode' rather than 'performant mode'");
87 /* define the PCI info for the cards we can control */
88 static const struct pci_device_id hpsa_pci_device_id[] = {
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C6},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
122 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
123 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
124 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CA},
125 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CB},
126 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CC},
127 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CD},
128 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21CE},
129 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
130 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
131 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
132 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
133 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
134 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
135 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
141 /* board_id = Subsystem Device ID & Vendor ID
142 * product = Marketing Name for the board
143 * access = Address of the struct of function pointers
145 static struct board_type products[] = {
146 {0x3241103C, "Smart Array P212", &SA5_access},
147 {0x3243103C, "Smart Array P410", &SA5_access},
148 {0x3245103C, "Smart Array P410i", &SA5_access},
149 {0x3247103C, "Smart Array P411", &SA5_access},
150 {0x3249103C, "Smart Array P812", &SA5_access},
151 {0x324A103C, "Smart Array P712m", &SA5_access},
152 {0x324B103C, "Smart Array P711m", &SA5_access},
153 {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
154 {0x3350103C, "Smart Array P222", &SA5_access},
155 {0x3351103C, "Smart Array P420", &SA5_access},
156 {0x3352103C, "Smart Array P421", &SA5_access},
157 {0x3353103C, "Smart Array P822", &SA5_access},
158 {0x3354103C, "Smart Array P420i", &SA5_access},
159 {0x3355103C, "Smart Array P220i", &SA5_access},
160 {0x3356103C, "Smart Array P721m", &SA5_access},
161 {0x1921103C, "Smart Array P830i", &SA5_access},
162 {0x1922103C, "Smart Array P430", &SA5_access},
163 {0x1923103C, "Smart Array P431", &SA5_access},
164 {0x1924103C, "Smart Array P830", &SA5_access},
165 {0x1926103C, "Smart Array P731m", &SA5_access},
166 {0x1928103C, "Smart Array P230i", &SA5_access},
167 {0x1929103C, "Smart Array P530", &SA5_access},
168 {0x21BD103C, "Smart Array", &SA5_access},
169 {0x21BE103C, "Smart Array", &SA5_access},
170 {0x21BF103C, "Smart Array", &SA5_access},
171 {0x21C0103C, "Smart Array", &SA5_access},
172 {0x21C1103C, "Smart Array", &SA5_access},
173 {0x21C2103C, "Smart Array", &SA5_access},
174 {0x21C3103C, "Smart Array", &SA5_access},
175 {0x21C4103C, "Smart Array", &SA5_access},
176 {0x21C5103C, "Smart Array", &SA5_access},
177 {0x21C6103C, "Smart Array", &SA5_access},
178 {0x21C7103C, "Smart Array", &SA5_access},
179 {0x21C8103C, "Smart Array", &SA5_access},
180 {0x21C9103C, "Smart Array", &SA5_access},
181 {0x21CA103C, "Smart Array", &SA5_access},
182 {0x21CB103C, "Smart Array", &SA5_access},
183 {0x21CC103C, "Smart Array", &SA5_access},
184 {0x21CD103C, "Smart Array", &SA5_access},
185 {0x21CE103C, "Smart Array", &SA5_access},
186 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
187 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
188 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
189 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
190 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
191 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
194 static int number_of_controllers;
196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
205 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
206 static struct CommandList *cmd_alloc(struct ctlr_info *h);
207 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
208 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
210 static void hpsa_free_cmd_pool(struct ctlr_info *h);
211 #define VPD_PAGE (1 << 8)
213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214 static void hpsa_scan_start(struct Scsi_Host *);
215 static int hpsa_scan_finished(struct Scsi_Host *sh,
216 unsigned long elapsed_time);
217 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
220 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
221 static int hpsa_slave_alloc(struct scsi_device *sdev);
222 static void hpsa_slave_destroy(struct scsi_device *sdev);
224 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
225 static int check_for_unit_attention(struct ctlr_info *h,
226 struct CommandList *c);
227 static void check_ioctl_unit_attention(struct ctlr_info *h,
228 struct CommandList *c);
229 /* performant mode helper functions */
230 static void calc_bucket_map(int *bucket, int num_buckets,
231 int nsgs, int min_blocks, u32 *bucket_map);
232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
233 static inline u32 next_command(struct ctlr_info *h, u8 q);
234 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
235 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
237 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
238 unsigned long *memory_bar);
239 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
240 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
242 static inline void finish_cmd(struct CommandList *c);
243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
244 #define BOARD_NOT_READY 0
245 #define BOARD_READY 1
246 static void hpsa_drain_accel_commands(struct ctlr_info *h);
247 static void hpsa_flush_cache(struct ctlr_info *h);
248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
249 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
250 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
251 static void hpsa_command_resubmit_worker(struct work_struct *work);
253 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
255 unsigned long *priv = shost_priv(sdev->host);
256 return (struct ctlr_info *) *priv;
259 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
261 unsigned long *priv = shost_priv(sh);
262 return (struct ctlr_info *) *priv;
265 static int check_for_unit_attention(struct ctlr_info *h,
266 struct CommandList *c)
268 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
271 switch (c->err_info->SenseInfo[12]) {
273 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
274 "detected, command retried\n", h->ctlr);
277 dev_warn(&h->pdev->dev,
278 HPSA "%d: LUN failure detected\n", h->ctlr);
280 case REPORT_LUNS_CHANGED:
281 dev_warn(&h->pdev->dev,
282 HPSA "%d: report LUN data changed\n", h->ctlr);
284 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285 * target (array) devices.
289 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
290 "or device reset detected\n", h->ctlr);
292 case UNIT_ATTENTION_CLEARED:
293 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
294 "cleared by another initiator\n", h->ctlr);
297 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
298 "unit attention detected\n", h->ctlr);
304 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
306 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
307 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
308 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
310 dev_warn(&h->pdev->dev, HPSA "device busy");
314 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
315 struct device_attribute *attr,
316 const char *buf, size_t count)
320 struct Scsi_Host *shost = class_to_shost(dev);
323 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
325 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
326 strncpy(tmpbuf, buf, len);
328 if (sscanf(tmpbuf, "%d", &status) != 1)
330 h = shost_to_hba(shost);
331 h->acciopath_status = !!status;
332 dev_warn(&h->pdev->dev,
333 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
334 h->acciopath_status ? "enabled" : "disabled");
338 static ssize_t host_store_raid_offload_debug(struct device *dev,
339 struct device_attribute *attr,
340 const char *buf, size_t count)
342 int debug_level, len;
344 struct Scsi_Host *shost = class_to_shost(dev);
347 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
349 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
350 strncpy(tmpbuf, buf, len);
352 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
356 h = shost_to_hba(shost);
357 h->raid_offload_debug = debug_level;
358 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
359 h->raid_offload_debug);
363 static ssize_t host_store_rescan(struct device *dev,
364 struct device_attribute *attr,
365 const char *buf, size_t count)
368 struct Scsi_Host *shost = class_to_shost(dev);
369 h = shost_to_hba(shost);
370 hpsa_scan_start(h->scsi_host);
374 static ssize_t host_show_firmware_revision(struct device *dev,
375 struct device_attribute *attr, char *buf)
378 struct Scsi_Host *shost = class_to_shost(dev);
379 unsigned char *fwrev;
381 h = shost_to_hba(shost);
382 if (!h->hba_inquiry_data)
384 fwrev = &h->hba_inquiry_data[32];
385 return snprintf(buf, 20, "%c%c%c%c\n",
386 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
389 static ssize_t host_show_commands_outstanding(struct device *dev,
390 struct device_attribute *attr, char *buf)
392 struct Scsi_Host *shost = class_to_shost(dev);
393 struct ctlr_info *h = shost_to_hba(shost);
395 return snprintf(buf, 20, "%d\n",
396 atomic_read(&h->commands_outstanding));
399 static ssize_t host_show_transport_mode(struct device *dev,
400 struct device_attribute *attr, char *buf)
403 struct Scsi_Host *shost = class_to_shost(dev);
405 h = shost_to_hba(shost);
406 return snprintf(buf, 20, "%s\n",
407 h->transMethod & CFGTBL_Trans_Performant ?
408 "performant" : "simple");
411 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
412 struct device_attribute *attr, char *buf)
415 struct Scsi_Host *shost = class_to_shost(dev);
417 h = shost_to_hba(shost);
418 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
419 (h->acciopath_status == 1) ? "enabled" : "disabled");
422 /* List of controllers which cannot be hard reset on kexec with reset_devices */
423 static u32 unresettable_controller[] = {
424 0x324a103C, /* Smart Array P712m */
425 0x324b103C, /* SmartArray P711m */
426 0x3223103C, /* Smart Array P800 */
427 0x3234103C, /* Smart Array P400 */
428 0x3235103C, /* Smart Array P400i */
429 0x3211103C, /* Smart Array E200i */
430 0x3212103C, /* Smart Array E200 */
431 0x3213103C, /* Smart Array E200i */
432 0x3214103C, /* Smart Array E200i */
433 0x3215103C, /* Smart Array E200i */
434 0x3237103C, /* Smart Array E500 */
435 0x323D103C, /* Smart Array P700m */
436 0x40800E11, /* Smart Array 5i */
437 0x409C0E11, /* Smart Array 6400 */
438 0x409D0E11, /* Smart Array 6400 EM */
439 0x40700E11, /* Smart Array 5300 */
440 0x40820E11, /* Smart Array 532 */
441 0x40830E11, /* Smart Array 5312 */
442 0x409A0E11, /* Smart Array 641 */
443 0x409B0E11, /* Smart Array 642 */
444 0x40910E11, /* Smart Array 6i */
447 /* List of controllers which cannot even be soft reset */
448 static u32 soft_unresettable_controller[] = {
449 0x40800E11, /* Smart Array 5i */
450 0x40700E11, /* Smart Array 5300 */
451 0x40820E11, /* Smart Array 532 */
452 0x40830E11, /* Smart Array 5312 */
453 0x409A0E11, /* Smart Array 641 */
454 0x409B0E11, /* Smart Array 642 */
455 0x40910E11, /* Smart Array 6i */
456 /* Exclude 640x boards. These are two pci devices in one slot
457 * which share a battery backed cache module. One controls the
458 * cache, the other accesses the cache through the one that controls
459 * it. If we reset the one controlling the cache, the other will
460 * likely not be happy. Just forbid resetting this conjoined mess.
461 * The 640x isn't really supported by hpsa anyway.
463 0x409C0E11, /* Smart Array 6400 */
464 0x409D0E11, /* Smart Array 6400 EM */
467 static int ctlr_is_hard_resettable(u32 board_id)
471 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
472 if (unresettable_controller[i] == board_id)
477 static int ctlr_is_soft_resettable(u32 board_id)
481 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
482 if (soft_unresettable_controller[i] == board_id)
487 static int ctlr_is_resettable(u32 board_id)
489 return ctlr_is_hard_resettable(board_id) ||
490 ctlr_is_soft_resettable(board_id);
493 static ssize_t host_show_resettable(struct device *dev,
494 struct device_attribute *attr, char *buf)
497 struct Scsi_Host *shost = class_to_shost(dev);
499 h = shost_to_hba(shost);
500 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
503 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
505 return (scsi3addr[3] & 0xC0) == 0x40;
508 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
509 "1(+0)ADM", "UNKNOWN"
511 #define HPSA_RAID_0 0
512 #define HPSA_RAID_4 1
513 #define HPSA_RAID_1 2 /* also used for RAID 10 */
514 #define HPSA_RAID_5 3 /* also used for RAID 50 */
515 #define HPSA_RAID_51 4
516 #define HPSA_RAID_6 5 /* also used for RAID 60 */
517 #define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
518 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
520 static ssize_t raid_level_show(struct device *dev,
521 struct device_attribute *attr, char *buf)
524 unsigned char rlevel;
526 struct scsi_device *sdev;
527 struct hpsa_scsi_dev_t *hdev;
530 sdev = to_scsi_device(dev);
531 h = sdev_to_hba(sdev);
532 spin_lock_irqsave(&h->lock, flags);
533 hdev = sdev->hostdata;
535 spin_unlock_irqrestore(&h->lock, flags);
539 /* Is this even a logical drive? */
540 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
541 spin_unlock_irqrestore(&h->lock, flags);
542 l = snprintf(buf, PAGE_SIZE, "N/A\n");
546 rlevel = hdev->raid_level;
547 spin_unlock_irqrestore(&h->lock, flags);
548 if (rlevel > RAID_UNKNOWN)
549 rlevel = RAID_UNKNOWN;
550 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
554 static ssize_t lunid_show(struct device *dev,
555 struct device_attribute *attr, char *buf)
558 struct scsi_device *sdev;
559 struct hpsa_scsi_dev_t *hdev;
561 unsigned char lunid[8];
563 sdev = to_scsi_device(dev);
564 h = sdev_to_hba(sdev);
565 spin_lock_irqsave(&h->lock, flags);
566 hdev = sdev->hostdata;
568 spin_unlock_irqrestore(&h->lock, flags);
571 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
572 spin_unlock_irqrestore(&h->lock, flags);
573 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
574 lunid[0], lunid[1], lunid[2], lunid[3],
575 lunid[4], lunid[5], lunid[6], lunid[7]);
578 static ssize_t unique_id_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
582 struct scsi_device *sdev;
583 struct hpsa_scsi_dev_t *hdev;
585 unsigned char sn[16];
587 sdev = to_scsi_device(dev);
588 h = sdev_to_hba(sdev);
589 spin_lock_irqsave(&h->lock, flags);
590 hdev = sdev->hostdata;
592 spin_unlock_irqrestore(&h->lock, flags);
595 memcpy(sn, hdev->device_id, sizeof(sn));
596 spin_unlock_irqrestore(&h->lock, flags);
597 return snprintf(buf, 16 * 2 + 2,
598 "%02X%02X%02X%02X%02X%02X%02X%02X"
599 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
600 sn[0], sn[1], sn[2], sn[3],
601 sn[4], sn[5], sn[6], sn[7],
602 sn[8], sn[9], sn[10], sn[11],
603 sn[12], sn[13], sn[14], sn[15]);
606 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
607 struct device_attribute *attr, char *buf)
610 struct scsi_device *sdev;
611 struct hpsa_scsi_dev_t *hdev;
615 sdev = to_scsi_device(dev);
616 h = sdev_to_hba(sdev);
617 spin_lock_irqsave(&h->lock, flags);
618 hdev = sdev->hostdata;
620 spin_unlock_irqrestore(&h->lock, flags);
623 offload_enabled = hdev->offload_enabled;
624 spin_unlock_irqrestore(&h->lock, flags);
625 return snprintf(buf, 20, "%d\n", offload_enabled);
628 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
629 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
630 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
631 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
632 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
633 host_show_hp_ssd_smart_path_enabled, NULL);
634 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
635 host_show_hp_ssd_smart_path_status,
636 host_store_hp_ssd_smart_path_status);
637 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
638 host_store_raid_offload_debug);
639 static DEVICE_ATTR(firmware_revision, S_IRUGO,
640 host_show_firmware_revision, NULL);
641 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
642 host_show_commands_outstanding, NULL);
643 static DEVICE_ATTR(transport_mode, S_IRUGO,
644 host_show_transport_mode, NULL);
645 static DEVICE_ATTR(resettable, S_IRUGO,
646 host_show_resettable, NULL);
648 static struct device_attribute *hpsa_sdev_attrs[] = {
649 &dev_attr_raid_level,
652 &dev_attr_hp_ssd_smart_path_enabled,
656 static struct device_attribute *hpsa_shost_attrs[] = {
658 &dev_attr_firmware_revision,
659 &dev_attr_commands_outstanding,
660 &dev_attr_transport_mode,
661 &dev_attr_resettable,
662 &dev_attr_hp_ssd_smart_path_status,
663 &dev_attr_raid_offload_debug,
667 static struct scsi_host_template hpsa_driver_template = {
668 .module = THIS_MODULE,
671 .queuecommand = hpsa_scsi_queue_command,
672 .scan_start = hpsa_scan_start,
673 .scan_finished = hpsa_scan_finished,
674 .change_queue_depth = hpsa_change_queue_depth,
676 .use_clustering = ENABLE_CLUSTERING,
677 .eh_abort_handler = hpsa_eh_abort_handler,
678 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
680 .slave_alloc = hpsa_slave_alloc,
681 .slave_destroy = hpsa_slave_destroy,
683 .compat_ioctl = hpsa_compat_ioctl,
685 .sdev_attrs = hpsa_sdev_attrs,
686 .shost_attrs = hpsa_shost_attrs,
691 static inline u32 next_command(struct ctlr_info *h, u8 q)
694 struct reply_queue_buffer *rq = &h->reply_queue[q];
696 if (h->transMethod & CFGTBL_Trans_io_accel1)
697 return h->access.command_completed(h, q);
699 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
700 return h->access.command_completed(h, q);
702 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
703 a = rq->head[rq->current_entry];
705 atomic_dec(&h->commands_outstanding);
709 /* Check for wraparound */
710 if (rq->current_entry == h->max_commands) {
711 rq->current_entry = 0;
718 * There are some special bits in the bus address of the
719 * command that we have to set for the controller to know
720 * how to process the command:
722 * Normal performant mode:
723 * bit 0: 1 means performant mode, 0 means simple mode.
724 * bits 1-3 = block fetch table entry
725 * bits 4-6 = command type (== 0)
728 * bit 0 = "performant mode" bit.
729 * bits 1-3 = block fetch table entry
730 * bits 4-6 = command type (== 110)
731 * (command type is needed because ioaccel1 mode
732 * commands are submitted through the same register as normal
733 * mode commands, so this is how the controller knows whether
734 * the command is normal mode or ioaccel1 mode.)
737 * bit 0 = "performant mode" bit.
738 * bits 1-4 = block fetch table entry (note extra bit)
739 * bits 4-6 = not needed, because ioaccel2 mode has
740 * a separate special register for submitting commands.
743 /* set_performant_mode: Modify the tag for cciss performant
744 * set bit 0 for pull model, bits 3-1 for block fetch
747 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
749 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
750 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
751 if (likely(h->msix_vector > 0))
752 c->Header.ReplyQueue =
753 raw_smp_processor_id() % h->nreply_queues;
757 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
758 struct CommandList *c)
760 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
762 /* Tell the controller to post the reply to the queue for this
763 * processor. This seems to give the best I/O throughput.
765 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
766 /* Set the bits in the address sent down to include:
767 * - performant mode bit (bit 0)
768 * - pull count (bits 1-3)
769 * - command type (bits 4-6)
771 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
772 IOACCEL1_BUSADDR_CMDTYPE;
775 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
776 struct CommandList *c)
778 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
780 /* Tell the controller to post the reply to the queue for this
781 * processor. This seems to give the best I/O throughput.
783 cp->reply_queue = smp_processor_id() % h->nreply_queues;
784 /* Set the bits in the address sent down to include:
785 * - performant mode bit not used in ioaccel mode 2
786 * - pull count (bits 0-3)
787 * - command type isn't needed for ioaccel2
789 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
792 static int is_firmware_flash_cmd(u8 *cdb)
794 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
798 * During firmware flash, the heartbeat register may not update as frequently
799 * as it should. So we dial down lockup detection during firmware flash. and
800 * dial it back up when firmware flash completes.
802 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
803 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
804 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
805 struct CommandList *c)
807 if (!is_firmware_flash_cmd(c->Request.CDB))
809 atomic_inc(&h->firmware_flash_in_progress);
810 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
813 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
814 struct CommandList *c)
816 if (is_firmware_flash_cmd(c->Request.CDB) &&
817 atomic_dec_and_test(&h->firmware_flash_in_progress))
818 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
821 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
822 struct CommandList *c)
824 switch (c->cmd_type) {
826 set_ioaccel1_performant_mode(h, c);
829 set_ioaccel2_performant_mode(h, c);
832 set_performant_mode(h, c);
834 dial_down_lockup_detection_during_fw_flash(h, c);
835 atomic_inc(&h->commands_outstanding);
836 h->access.submit_command(h, c);
839 static inline int is_hba_lunid(unsigned char scsi3addr[])
841 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
844 static inline int is_scsi_rev_5(struct ctlr_info *h)
846 if (!h->hba_inquiry_data)
848 if ((h->hba_inquiry_data[2] & 0x07) == 5)
853 static int hpsa_find_target_lun(struct ctlr_info *h,
854 unsigned char scsi3addr[], int bus, int *target, int *lun)
856 /* finds an unused bus, target, lun for a new physical device
857 * assumes h->devlock is held
860 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
862 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
864 for (i = 0; i < h->ndevices; i++) {
865 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
866 __set_bit(h->dev[i]->target, lun_taken);
869 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
870 if (i < HPSA_MAX_DEVICES) {
879 /* Add an entry into h->dev[] array. */
880 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
881 struct hpsa_scsi_dev_t *device,
882 struct hpsa_scsi_dev_t *added[], int *nadded)
884 /* assumes h->devlock is held */
887 unsigned char addr1[8], addr2[8];
888 struct hpsa_scsi_dev_t *sd;
890 if (n >= HPSA_MAX_DEVICES) {
891 dev_err(&h->pdev->dev, "too many devices, some will be "
896 /* physical devices do not have lun or target assigned until now. */
897 if (device->lun != -1)
898 /* Logical device, lun is already assigned. */
901 /* If this device a non-zero lun of a multi-lun device
902 * byte 4 of the 8-byte LUN addr will contain the logical
903 * unit no, zero otherwise.
905 if (device->scsi3addr[4] == 0) {
906 /* This is not a non-zero lun of a multi-lun device */
907 if (hpsa_find_target_lun(h, device->scsi3addr,
908 device->bus, &device->target, &device->lun) != 0)
913 /* This is a non-zero lun of a multi-lun device.
914 * Search through our list and find the device which
915 * has the same 8 byte LUN address, excepting byte 4.
916 * Assign the same bus and target for this new LUN.
917 * Use the logical unit number from the firmware.
919 memcpy(addr1, device->scsi3addr, 8);
921 for (i = 0; i < n; i++) {
923 memcpy(addr2, sd->scsi3addr, 8);
925 /* differ only in byte 4? */
926 if (memcmp(addr1, addr2, 8) == 0) {
927 device->bus = sd->bus;
928 device->target = sd->target;
929 device->lun = device->scsi3addr[4];
933 if (device->lun == -1) {
934 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
935 " suspect firmware bug or unsupported hardware "
944 added[*nadded] = device;
947 /* initially, (before registering with scsi layer) we don't
948 * know our hostno and we don't want to print anything first
949 * time anyway (the scsi layer's inquiries will show that info)
951 /* if (hostno != -1) */
952 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
953 scsi_device_type(device->devtype), hostno,
954 device->bus, device->target, device->lun);
958 /* Update an entry in h->dev[] array. */
959 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
960 int entry, struct hpsa_scsi_dev_t *new_entry)
962 /* assumes h->devlock is held */
963 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
965 /* Raid level changed. */
966 h->dev[entry]->raid_level = new_entry->raid_level;
968 /* Raid offload parameters changed. Careful about the ordering. */
969 if (new_entry->offload_config && new_entry->offload_enabled) {
971 * if drive is newly offload_enabled, we want to copy the
972 * raid map data first. If previously offload_enabled and
973 * offload_config were set, raid map data had better be
974 * the same as it was before. if raid map data is changed
975 * then it had better be the case that
976 * h->dev[entry]->offload_enabled is currently 0.
978 h->dev[entry]->raid_map = new_entry->raid_map;
979 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
980 wmb(); /* ensure raid map updated prior to ->offload_enabled */
982 h->dev[entry]->offload_config = new_entry->offload_config;
983 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
984 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
985 h->dev[entry]->queue_depth = new_entry->queue_depth;
987 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
988 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
989 new_entry->target, new_entry->lun);
992 /* Replace an entry from h->dev[] array. */
993 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
994 int entry, struct hpsa_scsi_dev_t *new_entry,
995 struct hpsa_scsi_dev_t *added[], int *nadded,
996 struct hpsa_scsi_dev_t *removed[], int *nremoved)
998 /* assumes h->devlock is held */
999 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1000 removed[*nremoved] = h->dev[entry];
1004 * New physical devices won't have target/lun assigned yet
1005 * so we need to preserve the values in the slot we are replacing.
1007 if (new_entry->target == -1) {
1008 new_entry->target = h->dev[entry]->target;
1009 new_entry->lun = h->dev[entry]->lun;
1012 h->dev[entry] = new_entry;
1013 added[*nadded] = new_entry;
1015 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1016 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1017 new_entry->target, new_entry->lun);
1020 /* Remove an entry from h->dev[] array. */
1021 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1022 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1024 /* assumes h->devlock is held */
1026 struct hpsa_scsi_dev_t *sd;
1028 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1031 removed[*nremoved] = h->dev[entry];
1034 for (i = entry; i < h->ndevices-1; i++)
1035 h->dev[i] = h->dev[i+1];
1037 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1038 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1042 #define SCSI3ADDR_EQ(a, b) ( \
1043 (a)[7] == (b)[7] && \
1044 (a)[6] == (b)[6] && \
1045 (a)[5] == (b)[5] && \
1046 (a)[4] == (b)[4] && \
1047 (a)[3] == (b)[3] && \
1048 (a)[2] == (b)[2] && \
1049 (a)[1] == (b)[1] && \
1052 static void fixup_botched_add(struct ctlr_info *h,
1053 struct hpsa_scsi_dev_t *added)
1055 /* called when scsi_add_device fails in order to re-adjust
1056 * h->dev[] to match the mid layer's view.
1058 unsigned long flags;
1061 spin_lock_irqsave(&h->lock, flags);
1062 for (i = 0; i < h->ndevices; i++) {
1063 if (h->dev[i] == added) {
1064 for (j = i; j < h->ndevices-1; j++)
1065 h->dev[j] = h->dev[j+1];
1070 spin_unlock_irqrestore(&h->lock, flags);
1074 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1075 struct hpsa_scsi_dev_t *dev2)
1077 /* we compare everything except lun and target as these
1078 * are not yet assigned. Compare parts likely
1081 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1082 sizeof(dev1->scsi3addr)) != 0)
1084 if (memcmp(dev1->device_id, dev2->device_id,
1085 sizeof(dev1->device_id)) != 0)
1087 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1089 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1091 if (dev1->devtype != dev2->devtype)
1093 if (dev1->bus != dev2->bus)
1098 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1099 struct hpsa_scsi_dev_t *dev2)
1101 /* Device attributes that can change, but don't mean
1102 * that the device is a different device, nor that the OS
1103 * needs to be told anything about the change.
1105 if (dev1->raid_level != dev2->raid_level)
1107 if (dev1->offload_config != dev2->offload_config)
1109 if (dev1->offload_enabled != dev2->offload_enabled)
1111 if (dev1->queue_depth != dev2->queue_depth)
1116 /* Find needle in haystack. If exact match found, return DEVICE_SAME,
1117 * and return needle location in *index. If scsi3addr matches, but not
1118 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1119 * location in *index.
1120 * In the case of a minor device attribute change, such as RAID level, just
1121 * return DEVICE_UPDATED, along with the updated device's location in index.
1122 * If needle not found, return DEVICE_NOT_FOUND.
1124 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1125 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1129 #define DEVICE_NOT_FOUND 0
1130 #define DEVICE_CHANGED 1
1131 #define DEVICE_SAME 2
1132 #define DEVICE_UPDATED 3
1133 for (i = 0; i < haystack_size; i++) {
1134 if (haystack[i] == NULL) /* previously removed. */
1136 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1138 if (device_is_the_same(needle, haystack[i])) {
1139 if (device_updated(needle, haystack[i]))
1140 return DEVICE_UPDATED;
1143 /* Keep offline devices offline */
1144 if (needle->volume_offline)
1145 return DEVICE_NOT_FOUND;
1146 return DEVICE_CHANGED;
1151 return DEVICE_NOT_FOUND;
1154 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1155 unsigned char scsi3addr[])
1157 struct offline_device_entry *device;
1158 unsigned long flags;
1160 /* Check to see if device is already on the list */
1161 spin_lock_irqsave(&h->offline_device_lock, flags);
1162 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1163 if (memcmp(device->scsi3addr, scsi3addr,
1164 sizeof(device->scsi3addr)) == 0) {
1165 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1169 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1171 /* Device is not on the list, add it. */
1172 device = kmalloc(sizeof(*device), GFP_KERNEL);
1174 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1177 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1178 spin_lock_irqsave(&h->offline_device_lock, flags);
1179 list_add_tail(&device->offline_list, &h->offline_device_list);
1180 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1183 /* Print a message explaining various offline volume states */
1184 static void hpsa_show_volume_status(struct ctlr_info *h,
1185 struct hpsa_scsi_dev_t *sd)
1187 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1188 dev_info(&h->pdev->dev,
1189 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1190 h->scsi_host->host_no,
1191 sd->bus, sd->target, sd->lun);
1192 switch (sd->volume_offline) {
1195 case HPSA_LV_UNDERGOING_ERASE:
1196 dev_info(&h->pdev->dev,
1197 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1198 h->scsi_host->host_no,
1199 sd->bus, sd->target, sd->lun);
1201 case HPSA_LV_UNDERGOING_RPI:
1202 dev_info(&h->pdev->dev,
1203 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1204 h->scsi_host->host_no,
1205 sd->bus, sd->target, sd->lun);
1207 case HPSA_LV_PENDING_RPI:
1208 dev_info(&h->pdev->dev,
1209 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1210 h->scsi_host->host_no,
1211 sd->bus, sd->target, sd->lun);
1213 case HPSA_LV_ENCRYPTED_NO_KEY:
1214 dev_info(&h->pdev->dev,
1215 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1216 h->scsi_host->host_no,
1217 sd->bus, sd->target, sd->lun);
1219 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1220 dev_info(&h->pdev->dev,
1221 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1222 h->scsi_host->host_no,
1223 sd->bus, sd->target, sd->lun);
1225 case HPSA_LV_UNDERGOING_ENCRYPTION:
1226 dev_info(&h->pdev->dev,
1227 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1228 h->scsi_host->host_no,
1229 sd->bus, sd->target, sd->lun);
1231 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1232 dev_info(&h->pdev->dev,
1233 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1234 h->scsi_host->host_no,
1235 sd->bus, sd->target, sd->lun);
1237 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1238 dev_info(&h->pdev->dev,
1239 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1240 h->scsi_host->host_no,
1241 sd->bus, sd->target, sd->lun);
1243 case HPSA_LV_PENDING_ENCRYPTION:
1244 dev_info(&h->pdev->dev,
1245 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1246 h->scsi_host->host_no,
1247 sd->bus, sd->target, sd->lun);
1249 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1250 dev_info(&h->pdev->dev,
1251 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1252 h->scsi_host->host_no,
1253 sd->bus, sd->target, sd->lun);
1259 * Figure the list of physical drive pointers for a logical drive with
1260 * raid offload configured.
1262 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1263 struct hpsa_scsi_dev_t *dev[], int ndevices,
1264 struct hpsa_scsi_dev_t *logical_drive)
1266 struct raid_map_data *map = &logical_drive->raid_map;
1267 struct raid_map_disk_data *dd = &map->data[0];
1269 int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1270 le16_to_cpu(map->metadata_disks_per_row);
1271 int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1272 le16_to_cpu(map->layout_map_count) *
1273 total_disks_per_row;
1274 int nphys_disk = le16_to_cpu(map->layout_map_count) *
1275 total_disks_per_row;
1278 if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1279 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1282 for (i = 0; i < nraid_map_entries; i++) {
1283 logical_drive->phys_disk[i] = NULL;
1284 if (!logical_drive->offload_config)
1286 for (j = 0; j < ndevices; j++) {
1287 if (dev[j]->devtype != TYPE_DISK)
1289 if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1291 if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1294 logical_drive->phys_disk[i] = dev[j];
1296 qdepth = min(h->nr_cmds, qdepth +
1297 logical_drive->phys_disk[i]->queue_depth);
1302 * This can happen if a physical drive is removed and
1303 * the logical drive is degraded. In that case, the RAID
1304 * map data will refer to a physical disk which isn't actually
1305 * present. And in that case offload_enabled should already
1306 * be 0, but we'll turn it off here just in case
1308 if (!logical_drive->phys_disk[i]) {
1309 logical_drive->offload_enabled = 0;
1310 logical_drive->queue_depth = h->nr_cmds;
1313 if (nraid_map_entries)
1315 * This is correct for reads, too high for full stripe writes,
1316 * way too high for partial stripe writes
1318 logical_drive->queue_depth = qdepth;
1320 logical_drive->queue_depth = h->nr_cmds;
1323 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1324 struct hpsa_scsi_dev_t *dev[], int ndevices)
1328 for (i = 0; i < ndevices; i++) {
1329 if (dev[i]->devtype != TYPE_DISK)
1331 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1333 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1337 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1338 struct hpsa_scsi_dev_t *sd[], int nsds)
1340 /* sd contains scsi3 addresses and devtypes, and inquiry
1341 * data. This function takes what's in sd to be the current
1342 * reality and updates h->dev[] to reflect that reality.
1344 int i, entry, device_change, changes = 0;
1345 struct hpsa_scsi_dev_t *csd;
1346 unsigned long flags;
1347 struct hpsa_scsi_dev_t **added, **removed;
1348 int nadded, nremoved;
1349 struct Scsi_Host *sh = NULL;
1351 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1352 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1354 if (!added || !removed) {
1355 dev_warn(&h->pdev->dev, "out of memory in "
1356 "adjust_hpsa_scsi_table\n");
1360 spin_lock_irqsave(&h->devlock, flags);
1362 /* find any devices in h->dev[] that are not in
1363 * sd[] and remove them from h->dev[], and for any
1364 * devices which have changed, remove the old device
1365 * info and add the new device info.
1366 * If minor device attributes change, just update
1367 * the existing device structure.
1372 while (i < h->ndevices) {
1374 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1375 if (device_change == DEVICE_NOT_FOUND) {
1377 hpsa_scsi_remove_entry(h, hostno, i,
1378 removed, &nremoved);
1379 continue; /* remove ^^^, hence i not incremented */
1380 } else if (device_change == DEVICE_CHANGED) {
1382 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1383 added, &nadded, removed, &nremoved);
1384 /* Set it to NULL to prevent it from being freed
1385 * at the bottom of hpsa_update_scsi_devices()
1388 } else if (device_change == DEVICE_UPDATED) {
1389 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1394 /* Now, make sure every device listed in sd[] is also
1395 * listed in h->dev[], adding them if they aren't found
1398 for (i = 0; i < nsds; i++) {
1399 if (!sd[i]) /* if already added above. */
1402 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1403 * as the SCSI mid-layer does not handle such devices well.
1404 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1405 * at 160Hz, and prevents the system from coming up.
1407 if (sd[i]->volume_offline) {
1408 hpsa_show_volume_status(h, sd[i]);
1409 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1410 h->scsi_host->host_no,
1411 sd[i]->bus, sd[i]->target, sd[i]->lun);
1415 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1416 h->ndevices, &entry);
1417 if (device_change == DEVICE_NOT_FOUND) {
1419 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1420 added, &nadded) != 0)
1422 sd[i] = NULL; /* prevent from being freed later. */
1423 } else if (device_change == DEVICE_CHANGED) {
1424 /* should never happen... */
1426 dev_warn(&h->pdev->dev,
1427 "device unexpectedly changed.\n");
1428 /* but if it does happen, we just ignore that device */
1431 spin_unlock_irqrestore(&h->devlock, flags);
1433 /* Monitor devices which are in one of several NOT READY states to be
1434 * brought online later. This must be done without holding h->devlock,
1435 * so don't touch h->dev[]
1437 for (i = 0; i < nsds; i++) {
1438 if (!sd[i]) /* if already added above. */
1440 if (sd[i]->volume_offline)
1441 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1444 /* Don't notify scsi mid layer of any changes the first time through
1445 * (or if there are no changes) scsi_scan_host will do it later the
1446 * first time through.
1448 if (hostno == -1 || !changes)
1452 /* Notify scsi mid layer of any removed devices */
1453 for (i = 0; i < nremoved; i++) {
1454 struct scsi_device *sdev =
1455 scsi_device_lookup(sh, removed[i]->bus,
1456 removed[i]->target, removed[i]->lun);
1458 scsi_remove_device(sdev);
1459 scsi_device_put(sdev);
1461 /* We don't expect to get here.
1462 * future cmds to this device will get selection
1463 * timeout as if the device was gone.
1465 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1466 " for removal.", hostno, removed[i]->bus,
1467 removed[i]->target, removed[i]->lun);
1473 /* Notify scsi mid layer of any added devices */
1474 for (i = 0; i < nadded; i++) {
1475 if (scsi_add_device(sh, added[i]->bus,
1476 added[i]->target, added[i]->lun) == 0)
1478 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1479 "device not added.\n", hostno, added[i]->bus,
1480 added[i]->target, added[i]->lun);
1481 /* now we have to remove it from h->dev,
1482 * since it didn't get added to scsi mid layer
1484 fixup_botched_add(h, added[i]);
1493 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1494 * Assume's h->devlock is held.
1496 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1497 int bus, int target, int lun)
1500 struct hpsa_scsi_dev_t *sd;
1502 for (i = 0; i < h->ndevices; i++) {
1504 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1510 /* link sdev->hostdata to our per-device structure. */
1511 static int hpsa_slave_alloc(struct scsi_device *sdev)
1513 struct hpsa_scsi_dev_t *sd;
1514 unsigned long flags;
1515 struct ctlr_info *h;
1517 h = sdev_to_hba(sdev);
1518 spin_lock_irqsave(&h->devlock, flags);
1519 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1520 sdev_id(sdev), sdev->lun);
1522 sdev->hostdata = sd;
1523 if (sd->queue_depth)
1524 scsi_change_queue_depth(sdev, sd->queue_depth);
1525 atomic_set(&sd->ioaccel_cmds_out, 0);
1527 spin_unlock_irqrestore(&h->devlock, flags);
1531 static void hpsa_slave_destroy(struct scsi_device *sdev)
1533 /* nothing to do. */
1536 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1540 if (!h->cmd_sg_list)
1542 for (i = 0; i < h->nr_cmds; i++) {
1543 kfree(h->cmd_sg_list[i]);
1544 h->cmd_sg_list[i] = NULL;
1546 kfree(h->cmd_sg_list);
1547 h->cmd_sg_list = NULL;
1550 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1554 if (h->chainsize <= 0)
1557 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1559 if (!h->cmd_sg_list) {
1560 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1563 for (i = 0; i < h->nr_cmds; i++) {
1564 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1565 h->chainsize, GFP_KERNEL);
1566 if (!h->cmd_sg_list[i]) {
1567 dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1574 hpsa_free_sg_chain_blocks(h);
1578 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1579 struct CommandList *c)
1581 struct SGDescriptor *chain_sg, *chain_block;
1585 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1586 chain_block = h->cmd_sg_list[c->cmdindex];
1587 chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1588 chain_len = sizeof(*chain_sg) *
1589 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1590 chain_sg->Len = cpu_to_le32(chain_len);
1591 temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1593 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1594 /* prevent subsequent unmapping */
1595 chain_sg->Addr = cpu_to_le64(0);
1598 chain_sg->Addr = cpu_to_le64(temp64);
1602 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1603 struct CommandList *c)
1605 struct SGDescriptor *chain_sg;
1607 if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1610 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1611 pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1612 le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1616 /* Decode the various types of errors on ioaccel2 path.
1617 * Return 1 for any error that should generate a RAID path retry.
1618 * Return 0 for errors that don't require a RAID path retry.
1620 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1621 struct CommandList *c,
1622 struct scsi_cmnd *cmd,
1623 struct io_accel2_cmd *c2)
1628 switch (c2->error_data.serv_response) {
1629 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1630 switch (c2->error_data.status) {
1631 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1633 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1634 dev_warn(&h->pdev->dev,
1635 "%s: task complete with check condition.\n",
1636 "HP SSD Smart Path");
1637 cmd->result |= SAM_STAT_CHECK_CONDITION;
1638 if (c2->error_data.data_present !=
1639 IOACCEL2_SENSE_DATA_PRESENT) {
1640 memset(cmd->sense_buffer, 0,
1641 SCSI_SENSE_BUFFERSIZE);
1644 /* copy the sense data */
1645 data_len = c2->error_data.sense_data_len;
1646 if (data_len > SCSI_SENSE_BUFFERSIZE)
1647 data_len = SCSI_SENSE_BUFFERSIZE;
1648 if (data_len > sizeof(c2->error_data.sense_data_buff))
1650 sizeof(c2->error_data.sense_data_buff);
1651 memcpy(cmd->sense_buffer,
1652 c2->error_data.sense_data_buff, data_len);
1655 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1656 dev_warn(&h->pdev->dev,
1657 "%s: task complete with BUSY status.\n",
1658 "HP SSD Smart Path");
1661 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1662 dev_warn(&h->pdev->dev,
1663 "%s: task complete with reservation conflict.\n",
1664 "HP SSD Smart Path");
1667 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1668 /* Make scsi midlayer do unlimited retries */
1669 cmd->result = DID_IMM_RETRY << 16;
1671 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1672 dev_warn(&h->pdev->dev,
1673 "%s: task complete with aborted status.\n",
1674 "HP SSD Smart Path");
1678 dev_warn(&h->pdev->dev,
1679 "%s: task complete with unrecognized status: 0x%02x\n",
1680 "HP SSD Smart Path", c2->error_data.status);
1685 case IOACCEL2_SERV_RESPONSE_FAILURE:
1686 /* don't expect to get here. */
1687 dev_warn(&h->pdev->dev,
1688 "unexpected delivery or target failure, status = 0x%02x\n",
1689 c2->error_data.status);
1692 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1694 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1696 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1697 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1700 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1701 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1704 dev_warn(&h->pdev->dev,
1705 "%s: Unrecognized server response: 0x%02x\n",
1706 "HP SSD Smart Path",
1707 c2->error_data.serv_response);
1712 return retry; /* retry on raid path? */
1715 static void process_ioaccel2_completion(struct ctlr_info *h,
1716 struct CommandList *c, struct scsi_cmnd *cmd,
1717 struct hpsa_scsi_dev_t *dev)
1719 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1721 /* check for good status */
1722 if (likely(c2->error_data.serv_response == 0 &&
1723 c2->error_data.status == 0)) {
1725 cmd->scsi_done(cmd);
1729 /* Any RAID offload error results in retry which will use
1730 * the normal I/O path so the controller can handle whatever's
1733 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1734 c2->error_data.serv_response ==
1735 IOACCEL2_SERV_RESPONSE_FAILURE) {
1736 if (c2->error_data.status ==
1737 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1738 dev->offload_enabled = 0;
1742 if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1746 cmd->scsi_done(cmd);
1750 INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1751 queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1754 static void complete_scsi_command(struct CommandList *cp)
1756 struct scsi_cmnd *cmd;
1757 struct ctlr_info *h;
1758 struct ErrorInfo *ei;
1759 struct hpsa_scsi_dev_t *dev;
1761 unsigned char sense_key;
1762 unsigned char asc; /* additional sense code */
1763 unsigned char ascq; /* additional sense code qualifier */
1764 unsigned long sense_data_size;
1767 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1769 dev = cmd->device->hostdata;
1771 scsi_dma_unmap(cmd); /* undo the DMA mappings */
1772 if ((cp->cmd_type == CMD_SCSI) &&
1773 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1774 hpsa_unmap_sg_chain_block(h, cp);
1776 cmd->result = (DID_OK << 16); /* host byte */
1777 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1779 if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1780 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1782 if (cp->cmd_type == CMD_IOACCEL2)
1783 return process_ioaccel2_completion(h, cp, cmd, dev);
1785 cmd->result |= ei->ScsiStatus;
1787 scsi_set_resid(cmd, ei->ResidualCnt);
1788 if (ei->CommandStatus == 0) {
1789 if (cp->cmd_type == CMD_IOACCEL1)
1790 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1792 cmd->scsi_done(cmd);
1796 /* copy the sense data */
1797 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1798 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1800 sense_data_size = sizeof(ei->SenseInfo);
1801 if (ei->SenseLen < sense_data_size)
1802 sense_data_size = ei->SenseLen;
1804 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1806 /* For I/O accelerator commands, copy over some fields to the normal
1807 * CISS header used below for error handling.
1809 if (cp->cmd_type == CMD_IOACCEL1) {
1810 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1811 cp->Header.SGList = scsi_sg_count(cmd);
1812 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1813 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1814 IOACCEL1_IOFLAGS_CDBLEN_MASK;
1815 cp->Header.tag = c->tag;
1816 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1817 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1819 /* Any RAID offload error results in retry which will use
1820 * the normal I/O path so the controller can handle whatever's
1823 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1824 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1825 dev->offload_enabled = 0;
1826 INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1827 queue_work_on(raw_smp_processor_id(),
1828 h->resubmit_wq, &cp->work);
1833 /* an error has occurred */
1834 switch (ei->CommandStatus) {
1836 case CMD_TARGET_STATUS:
1837 if (ei->ScsiStatus) {
1839 sense_key = 0xf & ei->SenseInfo[2];
1840 /* Get additional sense code */
1841 asc = ei->SenseInfo[12];
1842 /* Get addition sense code qualifier */
1843 ascq = ei->SenseInfo[13];
1845 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1846 if (sense_key == ABORTED_COMMAND) {
1847 cmd->result |= DID_SOFT_ERROR << 16;
1852 /* Problem was not a check condition
1853 * Pass it up to the upper layers...
1855 if (ei->ScsiStatus) {
1856 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1857 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1858 "Returning result: 0x%x\n",
1860 sense_key, asc, ascq,
1862 } else { /* scsi status is zero??? How??? */
1863 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1864 "Returning no connection.\n", cp),
1866 /* Ordinarily, this case should never happen,
1867 * but there is a bug in some released firmware
1868 * revisions that allows it to happen if, for
1869 * example, a 4100 backplane loses power and
1870 * the tape drive is in it. We assume that
1871 * it's a fatal error of some kind because we
1872 * can't show that it wasn't. We will make it
1873 * look like selection timeout since that is
1874 * the most common reason for this to occur,
1875 * and it's severe enough.
1878 cmd->result = DID_NO_CONNECT << 16;
1882 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1884 case CMD_DATA_OVERRUN:
1885 dev_warn(&h->pdev->dev, "cp %p has"
1886 " completed with data overrun "
1890 /* print_bytes(cp, sizeof(*cp), 1, 0);
1892 /* We get CMD_INVALID if you address a non-existent device
1893 * instead of a selection timeout (no response). You will
1894 * see this if you yank out a drive, then try to access it.
1895 * This is kind of a shame because it means that any other
1896 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1897 * missing target. */
1898 cmd->result = DID_NO_CONNECT << 16;
1901 case CMD_PROTOCOL_ERR:
1902 cmd->result = DID_ERROR << 16;
1903 dev_warn(&h->pdev->dev, "cp %p has "
1904 "protocol error\n", cp);
1906 case CMD_HARDWARE_ERR:
1907 cmd->result = DID_ERROR << 16;
1908 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1910 case CMD_CONNECTION_LOST:
1911 cmd->result = DID_ERROR << 16;
1912 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1915 cmd->result = DID_ABORT << 16;
1916 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1917 cp, ei->ScsiStatus);
1919 case CMD_ABORT_FAILED:
1920 cmd->result = DID_ERROR << 16;
1921 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1923 case CMD_UNSOLICITED_ABORT:
1924 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1925 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1929 cmd->result = DID_TIME_OUT << 16;
1930 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1932 case CMD_UNABORTABLE:
1933 cmd->result = DID_ERROR << 16;
1934 dev_warn(&h->pdev->dev, "Command unabortable\n");
1936 case CMD_IOACCEL_DISABLED:
1937 /* This only handles the direct pass-through case since RAID
1938 * offload is handled above. Just attempt a retry.
1940 cmd->result = DID_SOFT_ERROR << 16;
1941 dev_warn(&h->pdev->dev,
1942 "cp %p had HP SSD Smart Path error\n", cp);
1945 cmd->result = DID_ERROR << 16;
1946 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1947 cp, ei->CommandStatus);
1950 cmd->scsi_done(cmd);
1953 static void hpsa_pci_unmap(struct pci_dev *pdev,
1954 struct CommandList *c, int sg_used, int data_direction)
1958 for (i = 0; i < sg_used; i++)
1959 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1960 le32_to_cpu(c->SG[i].Len),
1964 static int hpsa_map_one(struct pci_dev *pdev,
1965 struct CommandList *cp,
1972 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1973 cp->Header.SGList = 0;
1974 cp->Header.SGTotal = cpu_to_le16(0);
1978 addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1979 if (dma_mapping_error(&pdev->dev, addr64)) {
1980 /* Prevent subsequent unmap of something never mapped */
1981 cp->Header.SGList = 0;
1982 cp->Header.SGTotal = cpu_to_le16(0);
1985 cp->SG[0].Addr = cpu_to_le64(addr64);
1986 cp->SG[0].Len = cpu_to_le32(buflen);
1987 cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1988 cp->Header.SGList = 1; /* no. SGs contig in this cmd */
1989 cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1993 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1994 struct CommandList *c)
1996 DECLARE_COMPLETION_ONSTACK(wait);
1999 enqueue_cmd_and_start_io(h, c);
2000 wait_for_completion(&wait);
2003 static u32 lockup_detected(struct ctlr_info *h)
2006 u32 rc, *lockup_detected;
2009 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2010 rc = *lockup_detected;
2015 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2016 struct CommandList *c)
2018 /* If controller lockup detected, fake a hardware error. */
2019 if (unlikely(lockup_detected(h)))
2020 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2022 hpsa_scsi_do_simple_cmd_core(h, c);
2025 #define MAX_DRIVER_CMD_RETRIES 25
2026 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2027 struct CommandList *c, int data_direction)
2029 int backoff_time = 10, retry_count = 0;
2032 memset(c->err_info, 0, sizeof(*c->err_info));
2033 hpsa_scsi_do_simple_cmd_core(h, c);
2035 if (retry_count > 3) {
2036 msleep(backoff_time);
2037 if (backoff_time < 1000)
2040 } while ((check_for_unit_attention(h, c) ||
2041 check_for_busy(h, c)) &&
2042 retry_count <= MAX_DRIVER_CMD_RETRIES);
2043 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2046 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2047 struct CommandList *c)
2049 const u8 *cdb = c->Request.CDB;
2050 const u8 *lun = c->Header.LUN.LunAddrBytes;
2052 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2053 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2054 txt, lun[0], lun[1], lun[2], lun[3],
2055 lun[4], lun[5], lun[6], lun[7],
2056 cdb[0], cdb[1], cdb[2], cdb[3],
2057 cdb[4], cdb[5], cdb[6], cdb[7],
2058 cdb[8], cdb[9], cdb[10], cdb[11],
2059 cdb[12], cdb[13], cdb[14], cdb[15]);
2062 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2063 struct CommandList *cp)
2065 const struct ErrorInfo *ei = cp->err_info;
2066 struct device *d = &cp->h->pdev->dev;
2067 const u8 *sd = ei->SenseInfo;
2069 switch (ei->CommandStatus) {
2070 case CMD_TARGET_STATUS:
2071 hpsa_print_cmd(h, "SCSI status", cp);
2072 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2073 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2074 sd[2] & 0x0f, sd[12], sd[13]);
2076 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2077 if (ei->ScsiStatus == 0)
2078 dev_warn(d, "SCSI status is abnormally zero. "
2079 "(probably indicates selection timeout "
2080 "reported incorrectly due to a known "
2081 "firmware bug, circa July, 2001.)\n");
2083 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2085 case CMD_DATA_OVERRUN:
2086 hpsa_print_cmd(h, "overrun condition", cp);
2089 /* controller unfortunately reports SCSI passthru's
2090 * to non-existent targets as invalid commands.
2092 hpsa_print_cmd(h, "invalid command", cp);
2093 dev_warn(d, "probably means device no longer present\n");
2096 case CMD_PROTOCOL_ERR:
2097 hpsa_print_cmd(h, "protocol error", cp);
2099 case CMD_HARDWARE_ERR:
2100 hpsa_print_cmd(h, "hardware error", cp);
2102 case CMD_CONNECTION_LOST:
2103 hpsa_print_cmd(h, "connection lost", cp);
2106 hpsa_print_cmd(h, "aborted", cp);
2108 case CMD_ABORT_FAILED:
2109 hpsa_print_cmd(h, "abort failed", cp);
2111 case CMD_UNSOLICITED_ABORT:
2112 hpsa_print_cmd(h, "unsolicited abort", cp);
2115 hpsa_print_cmd(h, "timed out", cp);
2117 case CMD_UNABORTABLE:
2118 hpsa_print_cmd(h, "unabortable", cp);
2121 hpsa_print_cmd(h, "unknown status", cp);
2122 dev_warn(d, "Unknown command status %x\n",
2127 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2128 u16 page, unsigned char *buf,
2129 unsigned char bufsize)
2132 struct CommandList *c;
2133 struct ErrorInfo *ei;
2138 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2142 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2143 page, scsi3addr, TYPE_CMD)) {
2147 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2149 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2150 hpsa_scsi_interpret_error(h, c);
2158 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2159 unsigned char *scsi3addr, unsigned char page,
2160 struct bmic_controller_parameters *buf, size_t bufsize)
2163 struct CommandList *c;
2164 struct ErrorInfo *ei;
2167 if (c == NULL) { /* trouble... */
2168 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2172 if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2173 page, scsi3addr, TYPE_CMD)) {
2177 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2179 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2180 hpsa_scsi_interpret_error(h, c);
2188 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2192 struct CommandList *c;
2193 struct ErrorInfo *ei;
2197 if (c == NULL) { /* trouble... */
2198 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2202 /* fill_cmd can't fail here, no data buffer to map. */
2203 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2204 scsi3addr, TYPE_MSG);
2205 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2206 hpsa_scsi_do_simple_cmd_core(h, c);
2207 /* no unmap needed here because no data xfer. */
2210 if (ei->CommandStatus != 0) {
2211 hpsa_scsi_interpret_error(h, c);
2218 static void hpsa_get_raid_level(struct ctlr_info *h,
2219 unsigned char *scsi3addr, unsigned char *raid_level)
2224 *raid_level = RAID_UNKNOWN;
2225 buf = kzalloc(64, GFP_KERNEL);
2228 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2230 *raid_level = buf[8];
2231 if (*raid_level > RAID_UNKNOWN)
2232 *raid_level = RAID_UNKNOWN;
2237 #define HPSA_MAP_DEBUG
2238 #ifdef HPSA_MAP_DEBUG
2239 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2240 struct raid_map_data *map_buff)
2242 struct raid_map_disk_data *dd = &map_buff->data[0];
2244 u16 map_cnt, row_cnt, disks_per_row;
2249 /* Show details only if debugging has been activated. */
2250 if (h->raid_offload_debug < 2)
2253 dev_info(&h->pdev->dev, "structure_size = %u\n",
2254 le32_to_cpu(map_buff->structure_size));
2255 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2256 le32_to_cpu(map_buff->volume_blk_size));
2257 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2258 le64_to_cpu(map_buff->volume_blk_cnt));
2259 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2260 map_buff->phys_blk_shift);
2261 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2262 map_buff->parity_rotation_shift);
2263 dev_info(&h->pdev->dev, "strip_size = %u\n",
2264 le16_to_cpu(map_buff->strip_size));
2265 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2266 le64_to_cpu(map_buff->disk_starting_blk));
2267 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2268 le64_to_cpu(map_buff->disk_blk_cnt));
2269 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2270 le16_to_cpu(map_buff->data_disks_per_row));
2271 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2272 le16_to_cpu(map_buff->metadata_disks_per_row));
2273 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2274 le16_to_cpu(map_buff->row_cnt));
2275 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2276 le16_to_cpu(map_buff->layout_map_count));
2277 dev_info(&h->pdev->dev, "flags = 0x%x\n",
2278 le16_to_cpu(map_buff->flags));
2279 dev_info(&h->pdev->dev, "encrypytion = %s\n",
2280 le16_to_cpu(map_buff->flags) &
2281 RAID_MAP_FLAG_ENCRYPT_ON ? "ON" : "OFF");
2282 dev_info(&h->pdev->dev, "dekindex = %u\n",
2283 le16_to_cpu(map_buff->dekindex));
2284 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2285 for (map = 0; map < map_cnt; map++) {
2286 dev_info(&h->pdev->dev, "Map%u:\n", map);
2287 row_cnt = le16_to_cpu(map_buff->row_cnt);
2288 for (row = 0; row < row_cnt; row++) {
2289 dev_info(&h->pdev->dev, " Row%u:\n", row);
2291 le16_to_cpu(map_buff->data_disks_per_row);
2292 for (col = 0; col < disks_per_row; col++, dd++)
2293 dev_info(&h->pdev->dev,
2294 " D%02u: h=0x%04x xor=%u,%u\n",
2295 col, dd->ioaccel_handle,
2296 dd->xor_mult[0], dd->xor_mult[1]);
2298 le16_to_cpu(map_buff->metadata_disks_per_row);
2299 for (col = 0; col < disks_per_row; col++, dd++)
2300 dev_info(&h->pdev->dev,
2301 " M%02u: h=0x%04x xor=%u,%u\n",
2302 col, dd->ioaccel_handle,
2303 dd->xor_mult[0], dd->xor_mult[1]);
2308 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2309 __attribute__((unused)) int rc,
2310 __attribute__((unused)) struct raid_map_data *map_buff)
2315 static int hpsa_get_raid_map(struct ctlr_info *h,
2316 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2319 struct CommandList *c;
2320 struct ErrorInfo *ei;
2324 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2327 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2328 sizeof(this_device->raid_map), 0,
2329 scsi3addr, TYPE_CMD)) {
2330 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2334 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2336 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2337 hpsa_scsi_interpret_error(h, c);
2343 /* @todo in the future, dynamically allocate RAID map memory */
2344 if (le32_to_cpu(this_device->raid_map.structure_size) >
2345 sizeof(this_device->raid_map)) {
2346 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2349 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2353 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2354 unsigned char scsi3addr[], u16 bmic_device_index,
2355 struct bmic_identify_physical_device *buf, size_t bufsize)
2358 struct CommandList *c;
2359 struct ErrorInfo *ei;
2362 rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2363 0, RAID_CTLR_LUNID, TYPE_CMD);
2367 c->Request.CDB[2] = bmic_device_index & 0xff;
2368 c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2370 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2372 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2373 hpsa_scsi_interpret_error(h, c);
2381 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2382 unsigned char scsi3addr[], u8 page)
2387 unsigned char *buf, bufsize;
2389 buf = kzalloc(256, GFP_KERNEL);
2393 /* Get the size of the page list first */
2394 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2395 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2396 buf, HPSA_VPD_HEADER_SZ);
2398 goto exit_unsupported;
2400 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2401 bufsize = pages + HPSA_VPD_HEADER_SZ;
2405 /* Get the whole VPD page list */
2406 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2407 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2410 goto exit_unsupported;
2413 for (i = 1; i <= pages; i++)
2414 if (buf[3 + i] == page)
2415 goto exit_supported;
2424 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2425 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2431 this_device->offload_config = 0;
2432 this_device->offload_enabled = 0;
2434 buf = kzalloc(64, GFP_KERNEL);
2437 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2439 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2440 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2444 #define IOACCEL_STATUS_BYTE 4
2445 #define OFFLOAD_CONFIGURED_BIT 0x01
2446 #define OFFLOAD_ENABLED_BIT 0x02
2447 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2448 this_device->offload_config =
2449 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2450 if (this_device->offload_config) {
2451 this_device->offload_enabled =
2452 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2453 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2454 this_device->offload_enabled = 0;
2461 /* Get the device id from inquiry page 0x83 */
2462 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2463 unsigned char *device_id, int buflen)
2470 buf = kzalloc(64, GFP_KERNEL);
2473 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2475 memcpy(device_id, &buf[8], buflen);
2480 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2481 void *buf, int bufsize,
2482 int extended_response)
2485 struct CommandList *c;
2486 unsigned char scsi3addr[8];
2487 struct ErrorInfo *ei;
2490 if (c == NULL) { /* trouble... */
2491 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2494 /* address the controller */
2495 memset(scsi3addr, 0, sizeof(scsi3addr));
2496 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2497 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2501 if (extended_response)
2502 c->Request.CDB[1] = extended_response;
2503 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2505 if (ei->CommandStatus != 0 &&
2506 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2507 hpsa_scsi_interpret_error(h, c);
2510 struct ReportLUNdata *rld = buf;
2512 if (rld->extended_response_flag != extended_response) {
2513 dev_err(&h->pdev->dev,
2514 "report luns requested format %u, got %u\n",
2516 rld->extended_response_flag);
2525 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2526 struct ReportExtendedLUNdata *buf, int bufsize)
2528 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2529 HPSA_REPORT_PHYS_EXTENDED);
2532 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2533 struct ReportLUNdata *buf, int bufsize)
2535 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2538 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2539 int bus, int target, int lun)
2542 device->target = target;
2546 /* Use VPD inquiry to get details of volume status */
2547 static int hpsa_get_volume_status(struct ctlr_info *h,
2548 unsigned char scsi3addr[])
2555 buf = kzalloc(64, GFP_KERNEL);
2557 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2559 /* Does controller have VPD for logical volume status? */
2560 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2563 /* Get the size of the VPD return buffer */
2564 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2565 buf, HPSA_VPD_HEADER_SZ);
2570 /* Now get the whole VPD buffer */
2571 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2572 buf, size + HPSA_VPD_HEADER_SZ);
2575 status = buf[4]; /* status byte */
2581 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2584 /* Determine offline status of a volume.
2587 * 0xff (offline for unknown reasons)
2588 * # (integer code indicating one of several NOT READY states
2589 * describing why a volume is to be kept offline)
2591 static int hpsa_volume_offline(struct ctlr_info *h,
2592 unsigned char scsi3addr[])
2594 struct CommandList *c;
2595 unsigned char *sense, sense_key, asc, ascq;
2599 #define ASC_LUN_NOT_READY 0x04
2600 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2601 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2606 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2607 hpsa_scsi_do_simple_cmd_core(h, c);
2608 sense = c->err_info->SenseInfo;
2609 sense_key = sense[2];
2612 cmd_status = c->err_info->CommandStatus;
2613 scsi_status = c->err_info->ScsiStatus;
2615 /* Is the volume 'not ready'? */
2616 if (cmd_status != CMD_TARGET_STATUS ||
2617 scsi_status != SAM_STAT_CHECK_CONDITION ||
2618 sense_key != NOT_READY ||
2619 asc != ASC_LUN_NOT_READY) {
2623 /* Determine the reason for not ready state */
2624 ldstat = hpsa_get_volume_status(h, scsi3addr);
2626 /* Keep volume offline in certain cases: */
2628 case HPSA_LV_UNDERGOING_ERASE:
2629 case HPSA_LV_UNDERGOING_RPI:
2630 case HPSA_LV_PENDING_RPI:
2631 case HPSA_LV_ENCRYPTED_NO_KEY:
2632 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2633 case HPSA_LV_UNDERGOING_ENCRYPTION:
2634 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2635 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2637 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2638 /* If VPD status page isn't available,
2639 * use ASC/ASCQ to determine state
2641 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2642 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2651 static int hpsa_update_device_info(struct ctlr_info *h,
2652 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2653 unsigned char *is_OBDR_device)
2656 #define OBDR_SIG_OFFSET 43
2657 #define OBDR_TAPE_SIG "$DR-10"
2658 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2659 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2661 unsigned char *inq_buff;
2662 unsigned char *obdr_sig;
2664 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2668 /* Do an inquiry to the device to see what it is. */
2669 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2670 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2671 /* Inquiry failed (msg printed already) */
2672 dev_err(&h->pdev->dev,
2673 "hpsa_update_device_info: inquiry failed\n");
2677 this_device->devtype = (inq_buff[0] & 0x1f);
2678 memcpy(this_device->scsi3addr, scsi3addr, 8);
2679 memcpy(this_device->vendor, &inq_buff[8],
2680 sizeof(this_device->vendor));
2681 memcpy(this_device->model, &inq_buff[16],
2682 sizeof(this_device->model));
2683 memset(this_device->device_id, 0,
2684 sizeof(this_device->device_id));
2685 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2686 sizeof(this_device->device_id));
2688 if (this_device->devtype == TYPE_DISK &&
2689 is_logical_dev_addr_mode(scsi3addr)) {
2692 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2693 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2694 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2695 volume_offline = hpsa_volume_offline(h, scsi3addr);
2696 if (volume_offline < 0 || volume_offline > 0xff)
2697 volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2698 this_device->volume_offline = volume_offline & 0xff;
2700 this_device->raid_level = RAID_UNKNOWN;
2701 this_device->offload_config = 0;
2702 this_device->offload_enabled = 0;
2703 this_device->volume_offline = 0;
2704 this_device->queue_depth = h->nr_cmds;
2707 if (is_OBDR_device) {
2708 /* See if this is a One-Button-Disaster-Recovery device
2709 * by looking for "$DR-10" at offset 43 in inquiry data.
2711 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2712 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2713 strncmp(obdr_sig, OBDR_TAPE_SIG,
2714 OBDR_SIG_LEN) == 0);
2725 static unsigned char *ext_target_model[] = {
2735 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2739 for (i = 0; ext_target_model[i]; i++)
2740 if (strncmp(device->model, ext_target_model[i],
2741 strlen(ext_target_model[i])) == 0)
2746 /* Helper function to assign bus, target, lun mapping of devices.
2747 * Puts non-external target logical volumes on bus 0, external target logical
2748 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2749 * Logical drive target and lun are assigned at this time, but
2750 * physical device lun and target assignment are deferred (assigned
2751 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2753 static void figure_bus_target_lun(struct ctlr_info *h,
2754 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2756 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2758 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2759 /* physical device, target and lun filled in later */
2760 if (is_hba_lunid(lunaddrbytes))
2761 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2763 /* defer target, lun assignment for physical devices */
2764 hpsa_set_bus_target_lun(device, 2, -1, -1);
2767 /* It's a logical device */
2768 if (is_ext_target(h, device)) {
2769 /* external target way, put logicals on bus 1
2770 * and match target/lun numbers box
2771 * reports, other smart array, bus 0, target 0, match lunid
2773 hpsa_set_bus_target_lun(device,
2774 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2777 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2781 * If there is no lun 0 on a target, linux won't find any devices.
2782 * For the external targets (arrays), we have to manually detect the enclosure
2783 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2784 * it for some reason. *tmpdevice is the target we're adding,
2785 * this_device is a pointer into the current element of currentsd[]
2786 * that we're building up in update_scsi_devices(), below.
2787 * lunzerobits is a bitmap that tracks which targets already have a
2789 * Returns 1 if an enclosure was added, 0 if not.
2791 static int add_ext_target_dev(struct ctlr_info *h,
2792 struct hpsa_scsi_dev_t *tmpdevice,
2793 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2794 unsigned long lunzerobits[], int *n_ext_target_devs)
2796 unsigned char scsi3addr[8];
2798 if (test_bit(tmpdevice->target, lunzerobits))
2799 return 0; /* There is already a lun 0 on this target. */
2801 if (!is_logical_dev_addr_mode(lunaddrbytes))
2802 return 0; /* It's the logical targets that may lack lun 0. */
2804 if (!is_ext_target(h, tmpdevice))
2805 return 0; /* Only external target devices have this problem. */
2807 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2810 memset(scsi3addr, 0, 8);
2811 scsi3addr[3] = tmpdevice->target;
2812 if (is_hba_lunid(scsi3addr))
2813 return 0; /* Don't add the RAID controller here. */
2815 if (is_scsi_rev_5(h))
2816 return 0; /* p1210m doesn't need to do this. */
2818 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2819 dev_warn(&h->pdev->dev, "Maximum number of external "
2820 "target devices exceeded. Check your hardware "
2825 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2827 (*n_ext_target_devs)++;
2828 hpsa_set_bus_target_lun(this_device,
2829 tmpdevice->bus, tmpdevice->target, 0);
2830 set_bit(tmpdevice->target, lunzerobits);
2835 * Get address of physical disk used for an ioaccel2 mode command:
2836 * 1. Extract ioaccel2 handle from the command.
2837 * 2. Find a matching ioaccel2 handle from list of physical disks.
2839 * 1 and set scsi3addr to address of matching physical
2840 * 0 if no matching physical disk was found.
2842 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2843 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2845 struct ReportExtendedLUNdata *physicals = NULL;
2846 int responsesize = 24; /* size of physical extended response */
2847 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2848 u32 nphysicals = 0; /* number of reported physical devs */
2849 int found = 0; /* found match (1) or not (0) */
2850 u32 find; /* handle we need to match */
2852 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2853 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2854 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2855 __le32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2856 __le32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2858 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2859 return 0; /* no match */
2861 /* point to the ioaccel2 device handle */
2862 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2864 return 0; /* no match */
2866 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2868 return 0; /* no match */
2870 d = scmd->device->hostdata;
2872 return 0; /* no match */
2874 it_nexus = cpu_to_le32(d->ioaccel_handle);
2875 scsi_nexus = c2a->scsi_nexus;
2876 find = le32_to_cpu(c2a->scsi_nexus);
2878 if (h->raid_offload_debug > 0)
2879 dev_info(&h->pdev->dev,
2880 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2881 __func__, scsi_nexus,
2882 d->device_id[0], d->device_id[1], d->device_id[2],
2883 d->device_id[3], d->device_id[4], d->device_id[5],
2884 d->device_id[6], d->device_id[7], d->device_id[8],
2885 d->device_id[9], d->device_id[10], d->device_id[11],
2886 d->device_id[12], d->device_id[13], d->device_id[14],
2889 /* Get the list of physical devices */
2890 physicals = kzalloc(reportsize, GFP_KERNEL);
2891 if (physicals == NULL)
2893 if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
2894 dev_err(&h->pdev->dev,
2895 "Can't lookup %s device handle: report physical LUNs failed.\n",
2896 "HP SSD Smart Path");
2900 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2903 /* find ioaccel2 handle in list of physicals: */
2904 for (i = 0; i < nphysicals; i++) {
2905 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2907 /* handle is in bytes 28-31 of each lun */
2908 if (entry->ioaccel_handle != find)
2909 continue; /* didn't match */
2911 memcpy(scsi3addr, entry->lunid, 8);
2912 if (h->raid_offload_debug > 0)
2913 dev_info(&h->pdev->dev,
2914 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2916 entry->ioaccel_handle, scsi3addr);
2917 break; /* found it */
2928 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2929 * logdev. The number of luns in physdev and logdev are returned in
2930 * *nphysicals and *nlogicals, respectively.
2931 * Returns 0 on success, -1 otherwise.
2933 static int hpsa_gather_lun_info(struct ctlr_info *h,
2934 struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
2935 struct ReportLUNdata *logdev, u32 *nlogicals)
2937 if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
2938 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2941 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
2942 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2943 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
2944 HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
2945 *nphysicals = HPSA_MAX_PHYS_LUN;
2947 if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
2948 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2951 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2952 /* Reject Logicals in excess of our max capability. */
2953 if (*nlogicals > HPSA_MAX_LUN) {
2954 dev_warn(&h->pdev->dev,
2955 "maximum logical LUNs (%d) exceeded. "
2956 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2957 *nlogicals - HPSA_MAX_LUN);
2958 *nlogicals = HPSA_MAX_LUN;
2960 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2961 dev_warn(&h->pdev->dev,
2962 "maximum logical + physical LUNs (%d) exceeded. "
2963 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2964 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2965 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2970 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2971 int i, int nphysicals, int nlogicals,
2972 struct ReportExtendedLUNdata *physdev_list,
2973 struct ReportLUNdata *logdev_list)
2975 /* Helper function, figure out where the LUN ID info is coming from
2976 * given index i, lists of physical and logical devices, where in
2977 * the list the raid controller is supposed to appear (first or last)
2980 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2981 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2983 if (i == raid_ctlr_position)
2984 return RAID_CTLR_LUNID;
2986 if (i < logicals_start)
2987 return &physdev_list->LUN[i -
2988 (raid_ctlr_position == 0)].lunid[0];
2990 if (i < last_device)
2991 return &logdev_list->LUN[i - nphysicals -
2992 (raid_ctlr_position == 0)][0];
2997 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
3000 int hba_mode_enabled;
3001 struct bmic_controller_parameters *ctlr_params;
3002 ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3007 rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3008 sizeof(struct bmic_controller_parameters));
3015 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3017 return hba_mode_enabled;
3020 /* get physical drive ioaccel handle and queue depth */
3021 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3022 struct hpsa_scsi_dev_t *dev,
3024 struct bmic_identify_physical_device *id_phys)
3027 struct ext_report_lun_entry *rle =
3028 (struct ext_report_lun_entry *) lunaddrbytes;
3030 dev->ioaccel_handle = rle->ioaccel_handle;
3031 memset(id_phys, 0, sizeof(*id_phys));
3032 rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3033 GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3036 /* Reserve space for FW operations */
3037 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3038 #define DRIVE_QUEUE_DEPTH 7
3040 le16_to_cpu(id_phys->current_queue_depth_limit) -
3041 DRIVE_CMDS_RESERVED_FOR_FW;
3043 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3044 atomic_set(&dev->ioaccel_cmds_out, 0);
3047 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3049 /* the idea here is we could get notified
3050 * that some devices have changed, so we do a report
3051 * physical luns and report logical luns cmd, and adjust
3052 * our list of devices accordingly.
3054 * The scsi3addr's of devices won't change so long as the
3055 * adapter is not reset. That means we can rescan and
3056 * tell which devices we already know about, vs. new
3057 * devices, vs. disappearing devices.
3059 struct ReportExtendedLUNdata *physdev_list = NULL;
3060 struct ReportLUNdata *logdev_list = NULL;
3061 struct bmic_identify_physical_device *id_phys = NULL;
3064 u32 ndev_allocated = 0;
3065 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3067 int i, n_ext_target_devs, ndevs_to_allocate;
3068 int raid_ctlr_position;
3069 int rescan_hba_mode;
3070 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3072 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3073 physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3074 logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3075 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3076 id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3078 if (!currentsd || !physdev_list || !logdev_list ||
3079 !tmpdevice || !id_phys) {
3080 dev_err(&h->pdev->dev, "out of memory\n");
3083 memset(lunzerobits, 0, sizeof(lunzerobits));
3085 rescan_hba_mode = hpsa_hba_mode_enabled(h);
3086 if (rescan_hba_mode < 0)
3089 if (!h->hba_mode_enabled && rescan_hba_mode)
3090 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3091 else if (h->hba_mode_enabled && !rescan_hba_mode)
3092 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3094 h->hba_mode_enabled = rescan_hba_mode;
3096 if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3097 logdev_list, &nlogicals))
3100 /* We might see up to the maximum number of logical and physical disks
3101 * plus external target devices, and a device for the local RAID
3104 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3106 /* Allocate the per device structures */
3107 for (i = 0; i < ndevs_to_allocate; i++) {
3108 if (i >= HPSA_MAX_DEVICES) {
3109 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3110 " %d devices ignored.\n", HPSA_MAX_DEVICES,
3111 ndevs_to_allocate - HPSA_MAX_DEVICES);
3115 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3116 if (!currentsd[i]) {
3117 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3118 __FILE__, __LINE__);
3124 if (is_scsi_rev_5(h))
3125 raid_ctlr_position = 0;
3127 raid_ctlr_position = nphysicals + nlogicals;
3129 /* adjust our table of devices */
3130 n_ext_target_devs = 0;
3131 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3132 u8 *lunaddrbytes, is_OBDR = 0;
3134 /* Figure out where the LUN ID info is coming from */
3135 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3136 i, nphysicals, nlogicals, physdev_list, logdev_list);
3137 /* skip masked physical devices. */
3138 if (lunaddrbytes[3] & 0xC0 &&
3139 i < nphysicals + (raid_ctlr_position == 0))
3142 /* Get device type, vendor, model, device id */
3143 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3145 continue; /* skip it if we can't talk to it. */
3146 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3147 this_device = currentsd[ncurrent];
3150 * For external target devices, we have to insert a LUN 0 which
3151 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3152 * is nonetheless an enclosure device there. We have to
3153 * present that otherwise linux won't find anything if
3154 * there is no lun 0.
3156 if (add_ext_target_dev(h, tmpdevice, this_device,
3157 lunaddrbytes, lunzerobits,
3158 &n_ext_target_devs)) {
3160 this_device = currentsd[ncurrent];
3163 *this_device = *tmpdevice;
3165 switch (this_device->devtype) {
3167 /* We don't *really* support actual CD-ROM devices,
3168 * just "One Button Disaster Recovery" tape drive
3169 * which temporarily pretends to be a CD-ROM drive.
3170 * So we check that the device is really an OBDR tape
3171 * device by checking for "$DR-10" in bytes 43-48 of
3178 if (h->hba_mode_enabled) {
3179 /* never use raid mapper in HBA mode */
3180 this_device->offload_enabled = 0;
3183 } else if (h->acciopath_status) {
3184 if (i >= nphysicals) {
3194 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3195 h->transMethod & CFGTBL_Trans_io_accel2) {
3196 hpsa_get_ioaccel_drive_info(h, this_device,
3197 lunaddrbytes, id_phys);
3198 atomic_set(&this_device->ioaccel_cmds_out, 0);
3203 case TYPE_MEDIUM_CHANGER:
3207 /* Only present the Smartarray HBA as a RAID controller.
3208 * If it's a RAID controller other than the HBA itself
3209 * (an external RAID controller, MSA500 or similar)
3212 if (!is_hba_lunid(lunaddrbytes))
3219 if (ncurrent >= HPSA_MAX_DEVICES)
3222 hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
3223 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3226 for (i = 0; i < ndev_allocated; i++)
3227 kfree(currentsd[i]);
3229 kfree(physdev_list);
3235 * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3236 * dma mapping and fills in the scatter gather entries of the
3239 static int hpsa_scatter_gather(struct ctlr_info *h,
3240 struct CommandList *cp,
3241 struct scsi_cmnd *cmd)
3244 struct scatterlist *sg;
3246 int use_sg, i, sg_index, chained;
3247 struct SGDescriptor *curr_sg;
3249 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3251 use_sg = scsi_dma_map(cmd);
3256 goto sglist_finished;
3261 scsi_for_each_sg(cmd, sg, use_sg, i) {
3262 if (i == h->max_cmd_sg_entries - 1 &&
3263 use_sg > h->max_cmd_sg_entries) {
3265 curr_sg = h->cmd_sg_list[cp->cmdindex];
3268 addr64 = (u64) sg_dma_address(sg);
3269 len = sg_dma_len(sg);
3270 curr_sg->Addr = cpu_to_le64(addr64);
3271 curr_sg->Len = cpu_to_le32(len);
3272 curr_sg->Ext = cpu_to_le32(0);
3275 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3277 if (use_sg + chained > h->maxSG)
3278 h->maxSG = use_sg + chained;
3281 cp->Header.SGList = h->max_cmd_sg_entries;
3282 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3283 if (hpsa_map_sg_chain_block(h, cp)) {
3284 scsi_dma_unmap(cmd);
3292 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3293 cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3297 #define IO_ACCEL_INELIGIBLE (1)
3298 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3304 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3311 if (*cdb_len == 6) {
3312 block = (((u32) cdb[2]) << 8) | cdb[3];
3315 BUG_ON(*cdb_len != 12);
3316 block = (((u32) cdb[2]) << 24) |
3317 (((u32) cdb[3]) << 16) |
3318 (((u32) cdb[4]) << 8) |
3321 (((u32) cdb[6]) << 24) |
3322 (((u32) cdb[7]) << 16) |
3323 (((u32) cdb[8]) << 8) |
3326 if (block_cnt > 0xffff)
3327 return IO_ACCEL_INELIGIBLE;
3329 cdb[0] = is_write ? WRITE_10 : READ_10;
3331 cdb[2] = (u8) (block >> 24);
3332 cdb[3] = (u8) (block >> 16);
3333 cdb[4] = (u8) (block >> 8);
3334 cdb[5] = (u8) (block);
3336 cdb[7] = (u8) (block_cnt >> 8);
3337 cdb[8] = (u8) (block_cnt);
3345 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3346 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3347 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3349 struct scsi_cmnd *cmd = c->scsi_cmd;
3350 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3352 unsigned int total_len = 0;
3353 struct scatterlist *sg;
3356 struct SGDescriptor *curr_sg;
3357 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3359 /* TODO: implement chaining support */
3360 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3361 atomic_dec(&phys_disk->ioaccel_cmds_out);
3362 return IO_ACCEL_INELIGIBLE;
3365 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3367 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3368 atomic_dec(&phys_disk->ioaccel_cmds_out);
3369 return IO_ACCEL_INELIGIBLE;
3372 c->cmd_type = CMD_IOACCEL1;
3374 /* Adjust the DMA address to point to the accelerated command buffer */
3375 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3376 (c->cmdindex * sizeof(*cp));
3377 BUG_ON(c->busaddr & 0x0000007F);
3379 use_sg = scsi_dma_map(cmd);
3381 atomic_dec(&phys_disk->ioaccel_cmds_out);
3387 scsi_for_each_sg(cmd, sg, use_sg, i) {
3388 addr64 = (u64) sg_dma_address(sg);
3389 len = sg_dma_len(sg);
3391 curr_sg->Addr = cpu_to_le64(addr64);
3392 curr_sg->Len = cpu_to_le32(len);
3393 curr_sg->Ext = cpu_to_le32(0);
3396 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3398 switch (cmd->sc_data_direction) {
3400 control |= IOACCEL1_CONTROL_DATA_OUT;
3402 case DMA_FROM_DEVICE:
3403 control |= IOACCEL1_CONTROL_DATA_IN;
3406 control |= IOACCEL1_CONTROL_NODATAXFER;
3409 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3410 cmd->sc_data_direction);
3415 control |= IOACCEL1_CONTROL_NODATAXFER;
3418 c->Header.SGList = use_sg;
3419 /* Fill out the command structure to submit */
3420 cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3421 cp->transfer_len = cpu_to_le32(total_len);
3422 cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3423 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3424 cp->control = cpu_to_le32(control);
3425 memcpy(cp->CDB, cdb, cdb_len);
3426 memcpy(cp->CISS_LUN, scsi3addr, 8);
3427 /* Tag was already set at init time. */
3428 enqueue_cmd_and_start_io(h, c);
3433 * Queue a command directly to a device behind the controller using the
3434 * I/O accelerator path.
3436 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3437 struct CommandList *c)
3439 struct scsi_cmnd *cmd = c->scsi_cmd;
3440 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3444 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3445 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3449 * Set encryption parameters for the ioaccel2 request
3451 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3452 struct CommandList *c, struct io_accel2_cmd *cp)
3454 struct scsi_cmnd *cmd = c->scsi_cmd;
3455 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3456 struct raid_map_data *map = &dev->raid_map;
3459 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3461 /* Are we doing encryption on this device */
3462 if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3464 /* Set the data encryption key index. */
3465 cp->dekindex = map->dekindex;
3467 /* Set the encryption enable flag, encoded into direction field. */
3468 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3470 /* Set encryption tweak values based on logical block address
3471 * If block size is 512, tweak value is LBA.
3472 * For other block sizes, tweak is (LBA * block size)/ 512)
3474 switch (cmd->cmnd[0]) {
3475 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3478 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3482 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3485 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3489 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3492 dev_err(&h->pdev->dev,
3493 "ERROR: %s: size (0x%x) not supported for encryption\n",
3494 __func__, cmd->cmnd[0]);
3499 if (le32_to_cpu(map->volume_blk_size) != 512)
3500 first_block = first_block *
3501 le32_to_cpu(map->volume_blk_size)/512;
3503 cp->tweak_lower = cpu_to_le32(first_block);
3504 cp->tweak_upper = cpu_to_le32(first_block >> 32);
3507 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3508 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3509 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3511 struct scsi_cmnd *cmd = c->scsi_cmd;
3512 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3513 struct ioaccel2_sg_element *curr_sg;
3515 struct scatterlist *sg;
3520 if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3521 atomic_dec(&phys_disk->ioaccel_cmds_out);
3522 return IO_ACCEL_INELIGIBLE;
3525 if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3526 atomic_dec(&phys_disk->ioaccel_cmds_out);
3527 return IO_ACCEL_INELIGIBLE;
3530 c->cmd_type = CMD_IOACCEL2;
3531 /* Adjust the DMA address to point to the accelerated command buffer */
3532 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3533 (c->cmdindex * sizeof(*cp));
3534 BUG_ON(c->busaddr & 0x0000007F);
3536 memset(cp, 0, sizeof(*cp));
3537 cp->IU_type = IOACCEL2_IU_TYPE;
3539 use_sg = scsi_dma_map(cmd);
3541 atomic_dec(&phys_disk->ioaccel_cmds_out);
3546 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3548 scsi_for_each_sg(cmd, sg, use_sg, i) {
3549 addr64 = (u64) sg_dma_address(sg);
3550 len = sg_dma_len(sg);
3552 curr_sg->address = cpu_to_le64(addr64);
3553 curr_sg->length = cpu_to_le32(len);
3554 curr_sg->reserved[0] = 0;
3555 curr_sg->reserved[1] = 0;
3556 curr_sg->reserved[2] = 0;
3557 curr_sg->chain_indicator = 0;
3561 switch (cmd->sc_data_direction) {
3563 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3564 cp->direction |= IOACCEL2_DIR_DATA_OUT;
3566 case DMA_FROM_DEVICE:
3567 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3568 cp->direction |= IOACCEL2_DIR_DATA_IN;
3571 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3572 cp->direction |= IOACCEL2_DIR_NO_DATA;
3575 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3576 cmd->sc_data_direction);
3581 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3582 cp->direction |= IOACCEL2_DIR_NO_DATA;
3585 /* Set encryption parameters, if necessary */
3586 set_encrypt_ioaccel2(h, c, cp);
3588 cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3589 cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3590 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3592 /* fill in sg elements */
3593 cp->sg_count = (u8) use_sg;
3595 cp->data_len = cpu_to_le32(total_len);
3596 cp->err_ptr = cpu_to_le64(c->busaddr +
3597 offsetof(struct io_accel2_cmd, error_data));
3598 cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3600 enqueue_cmd_and_start_io(h, c);
3605 * Queue a command to the correct I/O accelerator path.
3607 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3608 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3609 u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3611 /* Try to honor the device's queue depth */
3612 if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3613 phys_disk->queue_depth) {
3614 atomic_dec(&phys_disk->ioaccel_cmds_out);
3615 return IO_ACCEL_INELIGIBLE;
3617 if (h->transMethod & CFGTBL_Trans_io_accel1)
3618 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3619 cdb, cdb_len, scsi3addr,
3622 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3623 cdb, cdb_len, scsi3addr,
3627 static void raid_map_helper(struct raid_map_data *map,
3628 int offload_to_mirror, u32 *map_index, u32 *current_group)
3630 if (offload_to_mirror == 0) {
3631 /* use physical disk in the first mirrored group. */
3632 *map_index %= le16_to_cpu(map->data_disks_per_row);
3636 /* determine mirror group that *map_index indicates */
3637 *current_group = *map_index /
3638 le16_to_cpu(map->data_disks_per_row);
3639 if (offload_to_mirror == *current_group)
3641 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3642 /* select map index from next group */
3643 *map_index += le16_to_cpu(map->data_disks_per_row);
3646 /* select map index from first group */
3647 *map_index %= le16_to_cpu(map->data_disks_per_row);
3650 } while (offload_to_mirror != *current_group);
3654 * Attempt to perform offload RAID mapping for a logical volume I/O.
3656 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3657 struct CommandList *c)
3659 struct scsi_cmnd *cmd = c->scsi_cmd;
3660 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3661 struct raid_map_data *map = &dev->raid_map;
3662 struct raid_map_disk_data *dd = &map->data[0];
3665 u64 first_block, last_block;
3668 u64 first_row, last_row;
3669 u32 first_row_offset, last_row_offset;
3670 u32 first_column, last_column;
3671 u64 r0_first_row, r0_last_row;
3672 u32 r5or6_blocks_per_row;
3673 u64 r5or6_first_row, r5or6_last_row;
3674 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3675 u32 r5or6_first_column, r5or6_last_column;
3676 u32 total_disks_per_row;
3678 u32 first_group, last_group, current_group;
3686 #if BITS_PER_LONG == 32
3689 int offload_to_mirror;
3691 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3693 /* check for valid opcode, get LBA and block count */
3694 switch (cmd->cmnd[0]) {
3699 (((u64) cmd->cmnd[2]) << 8) |
3701 block_cnt = cmd->cmnd[4];
3709 (((u64) cmd->cmnd[2]) << 24) |
3710 (((u64) cmd->cmnd[3]) << 16) |
3711 (((u64) cmd->cmnd[4]) << 8) |
3714 (((u32) cmd->cmnd[7]) << 8) |
3721 (((u64) cmd->cmnd[2]) << 24) |
3722 (((u64) cmd->cmnd[3]) << 16) |
3723 (((u64) cmd->cmnd[4]) << 8) |
3726 (((u32) cmd->cmnd[6]) << 24) |
3727 (((u32) cmd->cmnd[7]) << 16) |
3728 (((u32) cmd->cmnd[8]) << 8) |
3735 (((u64) cmd->cmnd[2]) << 56) |
3736 (((u64) cmd->cmnd[3]) << 48) |
3737 (((u64) cmd->cmnd[4]) << 40) |
3738 (((u64) cmd->cmnd[5]) << 32) |
3739 (((u64) cmd->cmnd[6]) << 24) |
3740 (((u64) cmd->cmnd[7]) << 16) |
3741 (((u64) cmd->cmnd[8]) << 8) |
3744 (((u32) cmd->cmnd[10]) << 24) |
3745 (((u32) cmd->cmnd[11]) << 16) |
3746 (((u32) cmd->cmnd[12]) << 8) |
3750 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3752 last_block = first_block + block_cnt - 1;
3754 /* check for write to non-RAID-0 */
3755 if (is_write && dev->raid_level != 0)
3756 return IO_ACCEL_INELIGIBLE;
3758 /* check for invalid block or wraparound */
3759 if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3760 last_block < first_block)
3761 return IO_ACCEL_INELIGIBLE;
3763 /* calculate stripe information for the request */
3764 blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3765 le16_to_cpu(map->strip_size);
3766 strip_size = le16_to_cpu(map->strip_size);
3767 #if BITS_PER_LONG == 32
3768 tmpdiv = first_block;
3769 (void) do_div(tmpdiv, blocks_per_row);
3771 tmpdiv = last_block;
3772 (void) do_div(tmpdiv, blocks_per_row);
3774 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3775 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3776 tmpdiv = first_row_offset;
3777 (void) do_div(tmpdiv, strip_size);
3778 first_column = tmpdiv;
3779 tmpdiv = last_row_offset;
3780 (void) do_div(tmpdiv, strip_size);
3781 last_column = tmpdiv;
3783 first_row = first_block / blocks_per_row;
3784 last_row = last_block / blocks_per_row;
3785 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3786 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3787 first_column = first_row_offset / strip_size;
3788 last_column = last_row_offset / strip_size;
3791 /* if this isn't a single row/column then give to the controller */
3792 if ((first_row != last_row) || (first_column != last_column))
3793 return IO_ACCEL_INELIGIBLE;
3795 /* proceeding with driver mapping */
3796 total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3797 le16_to_cpu(map->metadata_disks_per_row);
3798 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3799 le16_to_cpu(map->row_cnt);
3800 map_index = (map_row * total_disks_per_row) + first_column;
3802 switch (dev->raid_level) {
3804 break; /* nothing special to do */
3806 /* Handles load balance across RAID 1 members.
3807 * (2-drive R1 and R10 with even # of drives.)
3808 * Appropriate for SSDs, not optimal for HDDs
3810 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3811 if (dev->offload_to_mirror)
3812 map_index += le16_to_cpu(map->data_disks_per_row);
3813 dev->offload_to_mirror = !dev->offload_to_mirror;
3816 /* Handles N-way mirrors (R1-ADM)
3817 * and R10 with # of drives divisible by 3.)
3819 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3821 offload_to_mirror = dev->offload_to_mirror;
3822 raid_map_helper(map, offload_to_mirror,
3823 &map_index, ¤t_group);
3824 /* set mirror group to use next time */
3826 (offload_to_mirror >=
3827 le16_to_cpu(map->layout_map_count) - 1)
3828 ? 0 : offload_to_mirror + 1;
3829 dev->offload_to_mirror = offload_to_mirror;
3830 /* Avoid direct use of dev->offload_to_mirror within this
3831 * function since multiple threads might simultaneously
3832 * increment it beyond the range of dev->layout_map_count -1.
3837 if (le16_to_cpu(map->layout_map_count) <= 1)
3840 /* Verify first and last block are in same RAID group */
3841 r5or6_blocks_per_row =
3842 le16_to_cpu(map->strip_size) *
3843 le16_to_cpu(map->data_disks_per_row);
3844 BUG_ON(r5or6_blocks_per_row == 0);
3845 stripesize = r5or6_blocks_per_row *
3846 le16_to_cpu(map->layout_map_count);
3847 #if BITS_PER_LONG == 32
3848 tmpdiv = first_block;
3849 first_group = do_div(tmpdiv, stripesize);
3850 tmpdiv = first_group;
3851 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3852 first_group = tmpdiv;
3853 tmpdiv = last_block;
3854 last_group = do_div(tmpdiv, stripesize);
3855 tmpdiv = last_group;
3856 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3857 last_group = tmpdiv;
3859 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3860 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3862 if (first_group != last_group)
3863 return IO_ACCEL_INELIGIBLE;
3865 /* Verify request is in a single row of RAID 5/6 */
3866 #if BITS_PER_LONG == 32
3867 tmpdiv = first_block;
3868 (void) do_div(tmpdiv, stripesize);
3869 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3870 tmpdiv = last_block;
3871 (void) do_div(tmpdiv, stripesize);
3872 r5or6_last_row = r0_last_row = tmpdiv;
3874 first_row = r5or6_first_row = r0_first_row =
3875 first_block / stripesize;
3876 r5or6_last_row = r0_last_row = last_block / stripesize;
3878 if (r5or6_first_row != r5or6_last_row)
3879 return IO_ACCEL_INELIGIBLE;
3882 /* Verify request is in a single column */
3883 #if BITS_PER_LONG == 32
3884 tmpdiv = first_block;
3885 first_row_offset = do_div(tmpdiv, stripesize);
3886 tmpdiv = first_row_offset;
3887 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3888 r5or6_first_row_offset = first_row_offset;
3889 tmpdiv = last_block;
3890 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3891 tmpdiv = r5or6_last_row_offset;
3892 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3893 tmpdiv = r5or6_first_row_offset;
3894 (void) do_div(tmpdiv, map->strip_size);
3895 first_column = r5or6_first_column = tmpdiv;
3896 tmpdiv = r5or6_last_row_offset;
3897 (void) do_div(tmpdiv, map->strip_size);
3898 r5or6_last_column = tmpdiv;
3900 first_row_offset = r5or6_first_row_offset =
3901 (u32)((first_block % stripesize) %
3902 r5or6_blocks_per_row);
3904 r5or6_last_row_offset =
3905 (u32)((last_block % stripesize) %
3906 r5or6_blocks_per_row);
3908 first_column = r5or6_first_column =
3909 r5or6_first_row_offset / le16_to_cpu(map->strip_size);
3911 r5or6_last_row_offset / le16_to_cpu(map->strip_size);
3913 if (r5or6_first_column != r5or6_last_column)
3914 return IO_ACCEL_INELIGIBLE;
3916 /* Request is eligible */
3917 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3918 le16_to_cpu(map->row_cnt);
3920 map_index = (first_group *
3921 (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
3922 (map_row * total_disks_per_row) + first_column;
3925 return IO_ACCEL_INELIGIBLE;
3928 c->phys_disk = dev->phys_disk[map_index];
3930 disk_handle = dd[map_index].ioaccel_handle;
3931 disk_block = le64_to_cpu(map->disk_starting_blk) +
3932 first_row * le16_to_cpu(map->strip_size) +
3933 (first_row_offset - first_column *
3934 le16_to_cpu(map->strip_size));
3935 disk_block_cnt = block_cnt;
3937 /* handle differing logical/physical block sizes */
3938 if (map->phys_blk_shift) {
3939 disk_block <<= map->phys_blk_shift;
3940 disk_block_cnt <<= map->phys_blk_shift;
3942 BUG_ON(disk_block_cnt > 0xffff);
3944 /* build the new CDB for the physical disk I/O */
3945 if (disk_block > 0xffffffff) {
3946 cdb[0] = is_write ? WRITE_16 : READ_16;
3948 cdb[2] = (u8) (disk_block >> 56);
3949 cdb[3] = (u8) (disk_block >> 48);
3950 cdb[4] = (u8) (disk_block >> 40);
3951 cdb[5] = (u8) (disk_block >> 32);
3952 cdb[6] = (u8) (disk_block >> 24);
3953 cdb[7] = (u8) (disk_block >> 16);
3954 cdb[8] = (u8) (disk_block >> 8);
3955 cdb[9] = (u8) (disk_block);
3956 cdb[10] = (u8) (disk_block_cnt >> 24);
3957 cdb[11] = (u8) (disk_block_cnt >> 16);
3958 cdb[12] = (u8) (disk_block_cnt >> 8);
3959 cdb[13] = (u8) (disk_block_cnt);
3964 cdb[0] = is_write ? WRITE_10 : READ_10;
3966 cdb[2] = (u8) (disk_block >> 24);
3967 cdb[3] = (u8) (disk_block >> 16);
3968 cdb[4] = (u8) (disk_block >> 8);
3969 cdb[5] = (u8) (disk_block);
3971 cdb[7] = (u8) (disk_block_cnt >> 8);
3972 cdb[8] = (u8) (disk_block_cnt);
3976 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3978 dev->phys_disk[map_index]);
3981 /* Submit commands down the "normal" RAID stack path */
3982 static int hpsa_ciss_submit(struct ctlr_info *h,
3983 struct CommandList *c, struct scsi_cmnd *cmd,
3984 unsigned char scsi3addr[])
3986 cmd->host_scribble = (unsigned char *) c;
3987 c->cmd_type = CMD_SCSI;
3989 c->Header.ReplyQueue = 0; /* unused in simple mode */
3990 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
3991 c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
3993 /* Fill in the request block... */
3995 c->Request.Timeout = 0;
3996 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
3997 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
3998 c->Request.CDBLen = cmd->cmd_len;
3999 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4000 switch (cmd->sc_data_direction) {
4002 c->Request.type_attr_dir =
4003 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4005 case DMA_FROM_DEVICE:
4006 c->Request.type_attr_dir =
4007 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4010 c->Request.type_attr_dir =
4011 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4013 case DMA_BIDIRECTIONAL:
4014 /* This can happen if a buggy application does a scsi passthru
4015 * and sets both inlen and outlen to non-zero. ( see
4016 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4019 c->Request.type_attr_dir =
4020 TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4021 /* This is technically wrong, and hpsa controllers should
4022 * reject it with CMD_INVALID, which is the most correct
4023 * response, but non-fibre backends appear to let it
4024 * slide by, and give the same results as if this field
4025 * were set correctly. Either way is acceptable for
4026 * our purposes here.
4032 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4033 cmd->sc_data_direction);
4038 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4040 return SCSI_MLQUEUE_HOST_BUSY;
4042 enqueue_cmd_and_start_io(h, c);
4043 /* the cmd'll come back via intr handler in complete_scsi_command() */
4047 static void hpsa_command_resubmit_worker(struct work_struct *work)
4049 struct scsi_cmnd *cmd;
4050 struct hpsa_scsi_dev_t *dev;
4051 struct CommandList *c =
4052 container_of(work, struct CommandList, work);
4055 dev = cmd->device->hostdata;
4057 cmd->result = DID_NO_CONNECT << 16;
4058 cmd->scsi_done(cmd);
4061 if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4063 * If we get here, it means dma mapping failed. Try
4064 * again via scsi mid layer, which will then get
4065 * SCSI_MLQUEUE_HOST_BUSY.
4067 cmd->result = DID_IMM_RETRY << 16;
4068 cmd->scsi_done(cmd);
4072 /* Running in struct Scsi_Host->host_lock less mode */
4073 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4075 struct ctlr_info *h;
4076 struct hpsa_scsi_dev_t *dev;
4077 unsigned char scsi3addr[8];
4078 struct CommandList *c;
4081 /* Get the ptr to our adapter structure out of cmd->host. */
4082 h = sdev_to_hba(cmd->device);
4083 dev = cmd->device->hostdata;
4085 cmd->result = DID_NO_CONNECT << 16;
4086 cmd->scsi_done(cmd);
4089 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4091 if (unlikely(lockup_detected(h))) {
4092 cmd->result = DID_ERROR << 16;
4093 cmd->scsi_done(cmd);
4097 if (c == NULL) { /* trouble... */
4098 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4099 return SCSI_MLQUEUE_HOST_BUSY;
4102 /* Call alternate submit routine for I/O accelerated commands.
4103 * Retries always go down the normal I/O path.
4105 if (likely(cmd->retries == 0 &&
4106 cmd->request->cmd_type == REQ_TYPE_FS &&
4107 h->acciopath_status)) {
4109 cmd->host_scribble = (unsigned char *) c;
4110 c->cmd_type = CMD_SCSI;
4113 if (dev->offload_enabled) {
4114 rc = hpsa_scsi_ioaccel_raid_map(h, c);
4116 return 0; /* Sent on ioaccel path */
4117 if (rc < 0) { /* scsi_dma_map failed. */
4119 return SCSI_MLQUEUE_HOST_BUSY;
4121 } else if (dev->ioaccel_handle) {
4122 rc = hpsa_scsi_ioaccel_direct_map(h, c);
4124 return 0; /* Sent on direct map path */
4125 if (rc < 0) { /* scsi_dma_map failed. */
4127 return SCSI_MLQUEUE_HOST_BUSY;
4131 return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4134 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4136 unsigned long flags;
4139 * Don't let rescans be initiated on a controller known
4140 * to be locked up. If the controller locks up *during*
4141 * a rescan, that thread is probably hosed, but at least
4142 * we can prevent new rescan threads from piling up on a
4143 * locked up controller.
4145 if (unlikely(lockup_detected(h))) {
4146 spin_lock_irqsave(&h->scan_lock, flags);
4147 h->scan_finished = 1;
4148 wake_up_all(&h->scan_wait_queue);
4149 spin_unlock_irqrestore(&h->scan_lock, flags);
4155 static void hpsa_scan_start(struct Scsi_Host *sh)
4157 struct ctlr_info *h = shost_to_hba(sh);
4158 unsigned long flags;
4160 if (do_not_scan_if_controller_locked_up(h))
4163 /* wait until any scan already in progress is finished. */
4165 spin_lock_irqsave(&h->scan_lock, flags);
4166 if (h->scan_finished)
4168 spin_unlock_irqrestore(&h->scan_lock, flags);
4169 wait_event(h->scan_wait_queue, h->scan_finished);
4170 /* Note: We don't need to worry about a race between this
4171 * thread and driver unload because the midlayer will
4172 * have incremented the reference count, so unload won't
4173 * happen if we're in here.
4176 h->scan_finished = 0; /* mark scan as in progress */
4177 spin_unlock_irqrestore(&h->scan_lock, flags);
4179 if (do_not_scan_if_controller_locked_up(h))
4182 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4184 spin_lock_irqsave(&h->scan_lock, flags);
4185 h->scan_finished = 1; /* mark scan as finished. */
4186 wake_up_all(&h->scan_wait_queue);
4187 spin_unlock_irqrestore(&h->scan_lock, flags);
4190 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4192 struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4199 else if (qdepth > logical_drive->queue_depth)
4200 qdepth = logical_drive->queue_depth;
4202 return scsi_change_queue_depth(sdev, qdepth);
4205 static int hpsa_scan_finished(struct Scsi_Host *sh,
4206 unsigned long elapsed_time)
4208 struct ctlr_info *h = shost_to_hba(sh);
4209 unsigned long flags;
4212 spin_lock_irqsave(&h->scan_lock, flags);
4213 finished = h->scan_finished;
4214 spin_unlock_irqrestore(&h->scan_lock, flags);
4218 static void hpsa_unregister_scsi(struct ctlr_info *h)
4220 /* we are being forcibly unloaded, and may not refuse. */
4221 scsi_remove_host(h->scsi_host);
4222 scsi_host_put(h->scsi_host);
4223 h->scsi_host = NULL;
4226 static int hpsa_register_scsi(struct ctlr_info *h)
4228 struct Scsi_Host *sh;
4231 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4238 sh->max_channel = 3;
4239 sh->max_cmd_len = MAX_COMMAND_SIZE;
4240 sh->max_lun = HPSA_MAX_LUN;
4241 sh->max_id = HPSA_MAX_LUN;
4242 sh->can_queue = h->nr_cmds -
4243 HPSA_CMDS_RESERVED_FOR_ABORTS -
4244 HPSA_CMDS_RESERVED_FOR_DRIVER -
4245 HPSA_MAX_CONCURRENT_PASSTHRUS;
4246 sh->cmd_per_lun = sh->can_queue;
4247 sh->sg_tablesize = h->maxsgentries;
4249 sh->hostdata[0] = (unsigned long) h;
4250 sh->irq = h->intr[h->intr_mode];
4251 sh->unique_id = sh->irq;
4252 error = scsi_add_host(sh, &h->pdev->dev);
4259 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4260 " failed for controller %d\n", __func__, h->ctlr);
4264 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4265 " failed for controller %d\n", __func__, h->ctlr);
4269 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4270 unsigned char lunaddr[])
4274 int waittime = 1; /* seconds */
4275 struct CommandList *c;
4279 dev_warn(&h->pdev->dev, "out of memory in "
4280 "wait_for_device_to_become_ready.\n");
4284 /* Send test unit ready until device ready, or give up. */
4285 while (count < HPSA_TUR_RETRY_LIMIT) {
4287 /* Wait for a bit. do this first, because if we send
4288 * the TUR right away, the reset will just abort it.
4290 msleep(1000 * waittime);
4292 rc = 0; /* Device ready. */
4294 /* Increase wait time with each try, up to a point. */
4295 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4296 waittime = waittime * 2;
4298 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4299 (void) fill_cmd(c, TEST_UNIT_READY, h,
4300 NULL, 0, 0, lunaddr, TYPE_CMD);
4301 hpsa_scsi_do_simple_cmd_core(h, c);
4302 /* no unmap needed here because no data xfer. */
4304 if (c->err_info->CommandStatus == CMD_SUCCESS)
4307 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4308 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4309 (c->err_info->SenseInfo[2] == NO_SENSE ||
4310 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4313 dev_warn(&h->pdev->dev, "waiting %d secs "
4314 "for device to become ready.\n", waittime);
4315 rc = 1; /* device not ready. */
4319 dev_warn(&h->pdev->dev, "giving up on device.\n");
4321 dev_warn(&h->pdev->dev, "device is ready.\n");
4327 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4328 * complaining. Doing a host- or bus-reset can't do anything good here.
4330 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4333 struct ctlr_info *h;
4334 struct hpsa_scsi_dev_t *dev;
4336 /* find the controller to which the command to be aborted was sent */
4337 h = sdev_to_hba(scsicmd->device);
4338 if (h == NULL) /* paranoia */
4340 dev = scsicmd->device->hostdata;
4342 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4343 "device lookup failed.\n");
4346 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4347 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4348 /* send a reset to the SCSI LUN which the command was sent to */
4349 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4350 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4353 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4357 static void swizzle_abort_tag(u8 *tag)
4361 memcpy(original_tag, tag, 8);
4362 tag[0] = original_tag[3];
4363 tag[1] = original_tag[2];
4364 tag[2] = original_tag[1];
4365 tag[3] = original_tag[0];
4366 tag[4] = original_tag[7];
4367 tag[5] = original_tag[6];
4368 tag[6] = original_tag[5];
4369 tag[7] = original_tag[4];
4372 static void hpsa_get_tag(struct ctlr_info *h,
4373 struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4376 if (c->cmd_type == CMD_IOACCEL1) {
4377 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4378 &h->ioaccel_cmd_pool[c->cmdindex];
4379 tag = le64_to_cpu(cm1->tag);
4380 *tagupper = cpu_to_le32(tag >> 32);
4381 *taglower = cpu_to_le32(tag);
4384 if (c->cmd_type == CMD_IOACCEL2) {
4385 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4386 &h->ioaccel2_cmd_pool[c->cmdindex];
4387 /* upper tag not used in ioaccel2 mode */
4388 memset(tagupper, 0, sizeof(*tagupper));
4389 *taglower = cm2->Tag;
4392 tag = le64_to_cpu(c->Header.tag);
4393 *tagupper = cpu_to_le32(tag >> 32);
4394 *taglower = cpu_to_le32(tag);
4397 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4398 struct CommandList *abort, int swizzle)
4401 struct CommandList *c;
4402 struct ErrorInfo *ei;
4403 __le32 tagupper, taglower;
4406 if (c == NULL) { /* trouble... */
4407 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4411 /* fill_cmd can't fail here, no buffer to map */
4412 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4413 0, 0, scsi3addr, TYPE_MSG);
4415 swizzle_abort_tag(&c->Request.CDB[4]);
4416 hpsa_scsi_do_simple_cmd_core(h, c);
4417 hpsa_get_tag(h, abort, &taglower, &tagupper);
4418 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4419 __func__, tagupper, taglower);
4420 /* no unmap needed here because no data xfer. */
4423 switch (ei->CommandStatus) {
4426 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4430 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4431 __func__, tagupper, taglower);
4432 hpsa_scsi_interpret_error(h, c);
4437 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4438 __func__, tagupper, taglower);
4442 /* ioaccel2 path firmware cannot handle abort task requests.
4443 * Change abort requests to physical target reset, and send to the
4444 * address of the physical disk used for the ioaccel 2 command.
4445 * Return 0 on success (IO_OK)
4449 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4450 unsigned char *scsi3addr, struct CommandList *abort)
4453 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4454 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4455 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4456 unsigned char *psa = &phys_scsi3addr[0];
4458 /* Get a pointer to the hpsa logical device. */
4459 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4460 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4462 dev_warn(&h->pdev->dev,
4463 "Cannot abort: no device pointer for command.\n");
4464 return -1; /* not abortable */
4467 if (h->raid_offload_debug > 0)
4468 dev_info(&h->pdev->dev,
4469 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4470 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4471 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4472 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4474 if (!dev->offload_enabled) {
4475 dev_warn(&h->pdev->dev,
4476 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4477 return -1; /* not abortable */
4480 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4481 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4482 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4483 return -1; /* not abortable */
4486 /* send the reset */
4487 if (h->raid_offload_debug > 0)
4488 dev_info(&h->pdev->dev,
4489 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4490 psa[0], psa[1], psa[2], psa[3],
4491 psa[4], psa[5], psa[6], psa[7]);
4492 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4494 dev_warn(&h->pdev->dev,
4495 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4496 psa[0], psa[1], psa[2], psa[3],
4497 psa[4], psa[5], psa[6], psa[7]);
4498 return rc; /* failed to reset */
4501 /* wait for device to recover */
4502 if (wait_for_device_to_become_ready(h, psa) != 0) {
4503 dev_warn(&h->pdev->dev,
4504 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4505 psa[0], psa[1], psa[2], psa[3],
4506 psa[4], psa[5], psa[6], psa[7]);
4507 return -1; /* failed to recover */
4510 /* device recovered */
4511 dev_info(&h->pdev->dev,
4512 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4513 psa[0], psa[1], psa[2], psa[3],
4514 psa[4], psa[5], psa[6], psa[7]);
4516 return rc; /* success */
4519 /* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4520 * tell which kind we're dealing with, so we send the abort both ways. There
4521 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4522 * way we construct our tags but we check anyway in case the assumptions which
4523 * make this true someday become false.
4525 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4526 unsigned char *scsi3addr, struct CommandList *abort)
4528 /* ioccelerator mode 2 commands should be aborted via the
4529 * accelerated path, since RAID path is unaware of these commands,
4530 * but underlying firmware can't handle abort TMF.
4531 * Change abort to physical device reset.
4533 if (abort->cmd_type == CMD_IOACCEL2)
4534 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4536 return hpsa_send_abort(h, scsi3addr, abort, 0) &&
4537 hpsa_send_abort(h, scsi3addr, abort, 1);
4540 /* Send an abort for the specified command.
4541 * If the device and controller support it,
4542 * send a task abort request.
4544 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4548 struct ctlr_info *h;
4549 struct hpsa_scsi_dev_t *dev;
4550 struct CommandList *abort; /* pointer to command to be aborted */
4551 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4552 char msg[256]; /* For debug messaging. */
4554 __le32 tagupper, taglower;
4557 /* Find the controller of the command to be aborted */
4558 h = sdev_to_hba(sc->device);
4560 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4563 /* Check that controller supports some kind of task abort */
4564 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4565 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4568 memset(msg, 0, sizeof(msg));
4569 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4570 h->scsi_host->host_no, sc->device->channel,
4571 sc->device->id, sc->device->lun);
4573 /* Find the device of the command to be aborted */
4574 dev = sc->device->hostdata;
4576 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4581 /* Get SCSI command to be aborted */
4582 abort = (struct CommandList *) sc->host_scribble;
4583 if (abort == NULL) {
4584 /* This can happen if the command already completed. */
4587 refcount = atomic_inc_return(&abort->refcount);
4588 if (refcount == 1) { /* Command is done already. */
4592 hpsa_get_tag(h, abort, &taglower, &tagupper);
4593 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4594 as = (struct scsi_cmnd *) abort->scsi_cmd;
4596 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4597 as->cmnd[0], as->serial_number);
4598 dev_dbg(&h->pdev->dev, "%s\n", msg);
4599 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4600 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4602 * Command is in flight, or possibly already completed
4603 * by the firmware (but not to the scsi mid layer) but we can't
4604 * distinguish which. Send the abort down.
4606 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4608 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4609 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4610 h->scsi_host->host_no,
4611 dev->bus, dev->target, dev->lun);
4615 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4617 /* If the abort(s) above completed and actually aborted the
4618 * command, then the command to be aborted should already be
4619 * completed. If not, wait around a bit more to see if they
4620 * manage to complete normally.
4622 #define ABORT_COMPLETE_WAIT_SECS 30
4623 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4624 refcount = atomic_read(&abort->refcount);
4632 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4633 msg, ABORT_COMPLETE_WAIT_SECS);
4639 * For operations that cannot sleep, a command block is allocated at init,
4640 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4641 * which ones are free or in use. Lock must be held when calling this.
4642 * cmd_free() is the complement.
4645 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4647 struct CommandList *c;
4649 union u64bit temp64;
4650 dma_addr_t cmd_dma_handle, err_dma_handle;
4652 unsigned long offset;
4655 * There is some *extremely* small but non-zero chance that that
4656 * multiple threads could get in here, and one thread could
4657 * be scanning through the list of bits looking for a free
4658 * one, but the free ones are always behind him, and other
4659 * threads sneak in behind him and eat them before he can
4660 * get to them, so that while there is always a free one, a
4661 * very unlucky thread might be starved anyway, never able to
4662 * beat the other threads. In reality, this happens so
4663 * infrequently as to be indistinguishable from never.
4666 offset = h->last_allocation; /* benignly racy */
4668 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
4669 if (unlikely(i == h->nr_cmds)) {
4673 c = h->cmd_pool + i;
4674 refcount = atomic_inc_return(&c->refcount);
4675 if (unlikely(refcount > 1)) {
4676 cmd_free(h, c); /* already in use */
4677 offset = (i + 1) % h->nr_cmds;
4680 set_bit(i & (BITS_PER_LONG - 1),
4681 h->cmd_pool_bits + (i / BITS_PER_LONG));
4682 break; /* it's ours now. */
4684 h->last_allocation = i; /* benignly racy */
4686 /* Zero out all of commandlist except the last field, refcount */
4687 memset(c, 0, offsetof(struct CommandList, refcount));
4688 c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
4689 cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4690 c->err_info = h->errinfo_pool + i;
4691 memset(c->err_info, 0, sizeof(*c->err_info));
4692 err_dma_handle = h->errinfo_pool_dhandle
4693 + i * sizeof(*c->err_info);
4697 c->busaddr = (u32) cmd_dma_handle;
4698 temp64.val = (u64) err_dma_handle;
4699 c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4700 c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4706 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4708 if (atomic_dec_and_test(&c->refcount)) {
4711 i = c - h->cmd_pool;
4712 clear_bit(i & (BITS_PER_LONG - 1),
4713 h->cmd_pool_bits + (i / BITS_PER_LONG));
4717 #ifdef CONFIG_COMPAT
4719 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4722 IOCTL32_Command_struct __user *arg32 =
4723 (IOCTL32_Command_struct __user *) arg;
4724 IOCTL_Command_struct arg64;
4725 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4729 memset(&arg64, 0, sizeof(arg64));
4731 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4732 sizeof(arg64.LUN_info));
4733 err |= copy_from_user(&arg64.Request, &arg32->Request,
4734 sizeof(arg64.Request));
4735 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4736 sizeof(arg64.error_info));
4737 err |= get_user(arg64.buf_size, &arg32->buf_size);
4738 err |= get_user(cp, &arg32->buf);
4739 arg64.buf = compat_ptr(cp);
4740 err |= copy_to_user(p, &arg64, sizeof(arg64));
4745 err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4748 err |= copy_in_user(&arg32->error_info, &p->error_info,
4749 sizeof(arg32->error_info));
4755 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4756 int cmd, void __user *arg)
4758 BIG_IOCTL32_Command_struct __user *arg32 =
4759 (BIG_IOCTL32_Command_struct __user *) arg;
4760 BIG_IOCTL_Command_struct arg64;
4761 BIG_IOCTL_Command_struct __user *p =
4762 compat_alloc_user_space(sizeof(arg64));
4766 memset(&arg64, 0, sizeof(arg64));
4768 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4769 sizeof(arg64.LUN_info));
4770 err |= copy_from_user(&arg64.Request, &arg32->Request,
4771 sizeof(arg64.Request));
4772 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4773 sizeof(arg64.error_info));
4774 err |= get_user(arg64.buf_size, &arg32->buf_size);
4775 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4776 err |= get_user(cp, &arg32->buf);
4777 arg64.buf = compat_ptr(cp);
4778 err |= copy_to_user(p, &arg64, sizeof(arg64));
4783 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4786 err |= copy_in_user(&arg32->error_info, &p->error_info,
4787 sizeof(arg32->error_info));
4793 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4796 case CCISS_GETPCIINFO:
4797 case CCISS_GETINTINFO:
4798 case CCISS_SETINTINFO:
4799 case CCISS_GETNODENAME:
4800 case CCISS_SETNODENAME:
4801 case CCISS_GETHEARTBEAT:
4802 case CCISS_GETBUSTYPES:
4803 case CCISS_GETFIRMVER:
4804 case CCISS_GETDRIVVER:
4805 case CCISS_REVALIDVOLS:
4806 case CCISS_DEREGDISK:
4807 case CCISS_REGNEWDISK:
4809 case CCISS_RESCANDISK:
4810 case CCISS_GETLUNINFO:
4811 return hpsa_ioctl(dev, cmd, arg);
4813 case CCISS_PASSTHRU32:
4814 return hpsa_ioctl32_passthru(dev, cmd, arg);
4815 case CCISS_BIG_PASSTHRU32:
4816 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4819 return -ENOIOCTLCMD;
4824 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4826 struct hpsa_pci_info pciinfo;
4830 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4831 pciinfo.bus = h->pdev->bus->number;
4832 pciinfo.dev_fn = h->pdev->devfn;
4833 pciinfo.board_id = h->board_id;
4834 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4839 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4841 DriverVer_type DriverVer;
4842 unsigned char vmaj, vmin, vsubmin;
4845 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4846 &vmaj, &vmin, &vsubmin);
4848 dev_info(&h->pdev->dev, "driver version string '%s' "
4849 "unrecognized.", HPSA_DRIVER_VERSION);
4854 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4857 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4862 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4864 IOCTL_Command_struct iocommand;
4865 struct CommandList *c;
4872 if (!capable(CAP_SYS_RAWIO))
4874 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4876 if ((iocommand.buf_size < 1) &&
4877 (iocommand.Request.Type.Direction != XFER_NONE)) {
4880 if (iocommand.buf_size > 0) {
4881 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4884 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4885 /* Copy the data into the buffer we created */
4886 if (copy_from_user(buff, iocommand.buf,
4887 iocommand.buf_size)) {
4892 memset(buff, 0, iocommand.buf_size);
4900 /* Fill in the command type */
4901 c->cmd_type = CMD_IOCTL_PEND;
4902 /* Fill in Command Header */
4903 c->Header.ReplyQueue = 0; /* unused in simple mode */
4904 if (iocommand.buf_size > 0) { /* buffer to fill */
4905 c->Header.SGList = 1;
4906 c->Header.SGTotal = cpu_to_le16(1);
4907 } else { /* no buffers to fill */
4908 c->Header.SGList = 0;
4909 c->Header.SGTotal = cpu_to_le16(0);
4911 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4913 /* Fill in Request block */
4914 memcpy(&c->Request, &iocommand.Request,
4915 sizeof(c->Request));
4917 /* Fill in the scatter gather information */
4918 if (iocommand.buf_size > 0) {
4919 temp64 = pci_map_single(h->pdev, buff,
4920 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4921 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4922 c->SG[0].Addr = cpu_to_le64(0);
4923 c->SG[0].Len = cpu_to_le32(0);
4927 c->SG[0].Addr = cpu_to_le64(temp64);
4928 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4929 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
4931 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
4932 if (iocommand.buf_size > 0)
4933 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
4934 check_ioctl_unit_attention(h, c);
4936 /* Copy the error information out */
4937 memcpy(&iocommand.error_info, c->err_info,
4938 sizeof(iocommand.error_info));
4939 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
4943 if ((iocommand.Request.Type.Direction & XFER_READ) &&
4944 iocommand.buf_size > 0) {
4945 /* Copy the data out of the buffer we created */
4946 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
4958 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4960 BIG_IOCTL_Command_struct *ioc;
4961 struct CommandList *c;
4962 unsigned char **buff = NULL;
4963 int *buff_size = NULL;
4969 BYTE __user *data_ptr;
4973 if (!capable(CAP_SYS_RAWIO))
4975 ioc = (BIG_IOCTL_Command_struct *)
4976 kmalloc(sizeof(*ioc), GFP_KERNEL);
4981 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
4985 if ((ioc->buf_size < 1) &&
4986 (ioc->Request.Type.Direction != XFER_NONE)) {
4990 /* Check kmalloc limits using all SGs */
4991 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
4995 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
4999 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5004 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5009 left = ioc->buf_size;
5010 data_ptr = ioc->buf;
5012 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5013 buff_size[sg_used] = sz;
5014 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5015 if (buff[sg_used] == NULL) {
5019 if (ioc->Request.Type.Direction & XFER_WRITE) {
5020 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5025 memset(buff[sg_used], 0, sz);
5035 c->cmd_type = CMD_IOCTL_PEND;
5036 c->Header.ReplyQueue = 0;
5037 c->Header.SGList = (u8) sg_used;
5038 c->Header.SGTotal = cpu_to_le16(sg_used);
5039 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5040 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5041 if (ioc->buf_size > 0) {
5043 for (i = 0; i < sg_used; i++) {
5044 temp64 = pci_map_single(h->pdev, buff[i],
5045 buff_size[i], PCI_DMA_BIDIRECTIONAL);
5046 if (dma_mapping_error(&h->pdev->dev,
5047 (dma_addr_t) temp64)) {
5048 c->SG[i].Addr = cpu_to_le64(0);
5049 c->SG[i].Len = cpu_to_le32(0);
5050 hpsa_pci_unmap(h->pdev, c, i,
5051 PCI_DMA_BIDIRECTIONAL);
5055 c->SG[i].Addr = cpu_to_le64(temp64);
5056 c->SG[i].Len = cpu_to_le32(buff_size[i]);
5057 c->SG[i].Ext = cpu_to_le32(0);
5059 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5061 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5063 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5064 check_ioctl_unit_attention(h, c);
5065 /* Copy the error information out */
5066 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5067 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5071 if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5074 /* Copy the data out of the buffer we created */
5075 BYTE __user *ptr = ioc->buf;
5076 for (i = 0; i < sg_used; i++) {
5077 if (copy_to_user(ptr, buff[i], buff_size[i])) {
5081 ptr += buff_size[i];
5091 for (i = 0; i < sg_used; i++)
5100 static void check_ioctl_unit_attention(struct ctlr_info *h,
5101 struct CommandList *c)
5103 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5104 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5105 (void) check_for_unit_attention(h, c);
5111 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5113 struct ctlr_info *h;
5114 void __user *argp = (void __user *)arg;
5117 h = sdev_to_hba(dev);
5120 case CCISS_DEREGDISK:
5121 case CCISS_REGNEWDISK:
5123 hpsa_scan_start(h->scsi_host);
5125 case CCISS_GETPCIINFO:
5126 return hpsa_getpciinfo_ioctl(h, argp);
5127 case CCISS_GETDRIVVER:
5128 return hpsa_getdrivver_ioctl(h, argp);
5129 case CCISS_PASSTHRU:
5130 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5132 rc = hpsa_passthru_ioctl(h, argp);
5133 atomic_inc(&h->passthru_cmds_avail);
5135 case CCISS_BIG_PASSTHRU:
5136 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5138 rc = hpsa_big_passthru_ioctl(h, argp);
5139 atomic_inc(&h->passthru_cmds_avail);
5146 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5149 struct CommandList *c;
5154 /* fill_cmd can't fail here, no data buffer to map */
5155 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5156 RAID_CTLR_LUNID, TYPE_MSG);
5157 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5159 enqueue_cmd_and_start_io(h, c);
5160 /* Don't wait for completion, the reset won't complete. Don't free
5161 * the command either. This is the last command we will send before
5162 * re-initializing everything, so it doesn't matter and won't leak.
5167 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5168 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5171 int pci_dir = XFER_NONE;
5172 struct CommandList *a; /* for commands to be aborted */
5174 c->cmd_type = CMD_IOCTL_PEND;
5175 c->Header.ReplyQueue = 0;
5176 if (buff != NULL && size > 0) {
5177 c->Header.SGList = 1;
5178 c->Header.SGTotal = cpu_to_le16(1);
5180 c->Header.SGList = 0;
5181 c->Header.SGTotal = cpu_to_le16(0);
5183 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5185 if (cmd_type == TYPE_CMD) {
5188 /* are we trying to read a vital product page */
5189 if (page_code & VPD_PAGE) {
5190 c->Request.CDB[1] = 0x01;
5191 c->Request.CDB[2] = (page_code & 0xff);
5193 c->Request.CDBLen = 6;
5194 c->Request.type_attr_dir =
5195 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5196 c->Request.Timeout = 0;
5197 c->Request.CDB[0] = HPSA_INQUIRY;
5198 c->Request.CDB[4] = size & 0xFF;
5200 case HPSA_REPORT_LOG:
5201 case HPSA_REPORT_PHYS:
5202 /* Talking to controller so It's a physical command
5203 mode = 00 target = 0. Nothing to write.
5205 c->Request.CDBLen = 12;
5206 c->Request.type_attr_dir =
5207 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5208 c->Request.Timeout = 0;
5209 c->Request.CDB[0] = cmd;
5210 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5211 c->Request.CDB[7] = (size >> 16) & 0xFF;
5212 c->Request.CDB[8] = (size >> 8) & 0xFF;
5213 c->Request.CDB[9] = size & 0xFF;
5215 case HPSA_CACHE_FLUSH:
5216 c->Request.CDBLen = 12;
5217 c->Request.type_attr_dir =
5218 TYPE_ATTR_DIR(cmd_type,
5219 ATTR_SIMPLE, XFER_WRITE);
5220 c->Request.Timeout = 0;
5221 c->Request.CDB[0] = BMIC_WRITE;
5222 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5223 c->Request.CDB[7] = (size >> 8) & 0xFF;
5224 c->Request.CDB[8] = size & 0xFF;
5226 case TEST_UNIT_READY:
5227 c->Request.CDBLen = 6;
5228 c->Request.type_attr_dir =
5229 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5230 c->Request.Timeout = 0;
5232 case HPSA_GET_RAID_MAP:
5233 c->Request.CDBLen = 12;
5234 c->Request.type_attr_dir =
5235 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5236 c->Request.Timeout = 0;
5237 c->Request.CDB[0] = HPSA_CISS_READ;
5238 c->Request.CDB[1] = cmd;
5239 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5240 c->Request.CDB[7] = (size >> 16) & 0xFF;
5241 c->Request.CDB[8] = (size >> 8) & 0xFF;
5242 c->Request.CDB[9] = size & 0xFF;
5244 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5245 c->Request.CDBLen = 10;
5246 c->Request.type_attr_dir =
5247 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5248 c->Request.Timeout = 0;
5249 c->Request.CDB[0] = BMIC_READ;
5250 c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5251 c->Request.CDB[7] = (size >> 16) & 0xFF;
5252 c->Request.CDB[8] = (size >> 8) & 0xFF;
5254 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5255 c->Request.CDBLen = 10;
5256 c->Request.type_attr_dir =
5257 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5258 c->Request.Timeout = 0;
5259 c->Request.CDB[0] = BMIC_READ;
5260 c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5261 c->Request.CDB[7] = (size >> 16) & 0xFF;
5262 c->Request.CDB[8] = (size >> 8) & 0XFF;
5265 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5269 } else if (cmd_type == TYPE_MSG) {
5272 case HPSA_DEVICE_RESET_MSG:
5273 c->Request.CDBLen = 16;
5274 c->Request.type_attr_dir =
5275 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5276 c->Request.Timeout = 0; /* Don't time out */
5277 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5278 c->Request.CDB[0] = cmd;
5279 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5280 /* If bytes 4-7 are zero, it means reset the */
5282 c->Request.CDB[4] = 0x00;
5283 c->Request.CDB[5] = 0x00;
5284 c->Request.CDB[6] = 0x00;
5285 c->Request.CDB[7] = 0x00;
5287 case HPSA_ABORT_MSG:
5288 a = buff; /* point to command to be aborted */
5289 dev_dbg(&h->pdev->dev,
5290 "Abort Tag:0x%016llx request Tag:0x%016llx",
5291 a->Header.tag, c->Header.tag);
5292 c->Request.CDBLen = 16;
5293 c->Request.type_attr_dir =
5294 TYPE_ATTR_DIR(cmd_type,
5295 ATTR_SIMPLE, XFER_WRITE);
5296 c->Request.Timeout = 0; /* Don't time out */
5297 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5298 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5299 c->Request.CDB[2] = 0x00; /* reserved */
5300 c->Request.CDB[3] = 0x00; /* reserved */
5301 /* Tag to abort goes in CDB[4]-CDB[11] */
5302 memcpy(&c->Request.CDB[4], &a->Header.tag,
5303 sizeof(a->Header.tag));
5304 c->Request.CDB[12] = 0x00; /* reserved */
5305 c->Request.CDB[13] = 0x00; /* reserved */
5306 c->Request.CDB[14] = 0x00; /* reserved */
5307 c->Request.CDB[15] = 0x00; /* reserved */
5310 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5315 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5319 switch (GET_DIR(c->Request.type_attr_dir)) {
5321 pci_dir = PCI_DMA_FROMDEVICE;
5324 pci_dir = PCI_DMA_TODEVICE;
5327 pci_dir = PCI_DMA_NONE;
5330 pci_dir = PCI_DMA_BIDIRECTIONAL;
5332 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5338 * Map (physical) PCI mem into (virtual) kernel space
5340 static void __iomem *remap_pci_mem(ulong base, ulong size)
5342 ulong page_base = ((ulong) base) & PAGE_MASK;
5343 ulong page_offs = ((ulong) base) - page_base;
5344 void __iomem *page_remapped = ioremap_nocache(page_base,
5347 return page_remapped ? (page_remapped + page_offs) : NULL;
5350 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5352 return h->access.command_completed(h, q);
5355 static inline bool interrupt_pending(struct ctlr_info *h)
5357 return h->access.intr_pending(h);
5360 static inline long interrupt_not_for_us(struct ctlr_info *h)
5362 return (h->access.intr_pending(h) == 0) ||
5363 (h->interrupts_enabled == 0);
5366 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5369 if (unlikely(tag_index >= h->nr_cmds)) {
5370 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5376 static inline void finish_cmd(struct CommandList *c)
5378 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5379 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5380 || c->cmd_type == CMD_IOACCEL2))
5381 complete_scsi_command(c);
5382 else if (c->cmd_type == CMD_IOCTL_PEND)
5383 complete(c->waiting);
5387 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5389 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5390 #define HPSA_SIMPLE_ERROR_BITS 0x03
5391 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5392 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5393 return tag & ~HPSA_PERF_ERROR_BITS;
5396 /* process completion of an indexed ("direct lookup") command */
5397 static inline void process_indexed_cmd(struct ctlr_info *h,
5401 struct CommandList *c;
5403 tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5404 if (!bad_tag(h, tag_index, raw_tag)) {
5405 c = h->cmd_pool + tag_index;
5410 /* Some controllers, like p400, will give us one interrupt
5411 * after a soft reset, even if we turned interrupts off.
5412 * Only need to check for this in the hpsa_xxx_discard_completions
5415 static int ignore_bogus_interrupt(struct ctlr_info *h)
5417 if (likely(!reset_devices))
5420 if (likely(h->interrupts_enabled))
5423 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5424 "(known firmware bug.) Ignoring.\n");
5430 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5431 * Relies on (h-q[x] == x) being true for x such that
5432 * 0 <= x < MAX_REPLY_QUEUES.
5434 static struct ctlr_info *queue_to_hba(u8 *queue)
5436 return container_of((queue - *queue), struct ctlr_info, q[0]);
5439 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5441 struct ctlr_info *h = queue_to_hba(queue);
5442 u8 q = *(u8 *) queue;
5445 if (ignore_bogus_interrupt(h))
5448 if (interrupt_not_for_us(h))
5450 h->last_intr_timestamp = get_jiffies_64();
5451 while (interrupt_pending(h)) {
5452 raw_tag = get_next_completion(h, q);
5453 while (raw_tag != FIFO_EMPTY)
5454 raw_tag = next_command(h, q);
5459 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5461 struct ctlr_info *h = queue_to_hba(queue);
5463 u8 q = *(u8 *) queue;
5465 if (ignore_bogus_interrupt(h))
5468 h->last_intr_timestamp = get_jiffies_64();
5469 raw_tag = get_next_completion(h, q);
5470 while (raw_tag != FIFO_EMPTY)
5471 raw_tag = next_command(h, q);
5475 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5477 struct ctlr_info *h = queue_to_hba((u8 *) queue);
5479 u8 q = *(u8 *) queue;
5481 if (interrupt_not_for_us(h))
5483 h->last_intr_timestamp = get_jiffies_64();
5484 while (interrupt_pending(h)) {
5485 raw_tag = get_next_completion(h, q);
5486 while (raw_tag != FIFO_EMPTY) {
5487 process_indexed_cmd(h, raw_tag);
5488 raw_tag = next_command(h, q);
5494 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5496 struct ctlr_info *h = queue_to_hba(queue);
5498 u8 q = *(u8 *) queue;
5500 h->last_intr_timestamp = get_jiffies_64();
5501 raw_tag = get_next_completion(h, q);
5502 while (raw_tag != FIFO_EMPTY) {
5503 process_indexed_cmd(h, raw_tag);
5504 raw_tag = next_command(h, q);
5509 /* Send a message CDB to the firmware. Careful, this only works
5510 * in simple mode, not performant mode due to the tag lookup.
5511 * We only ever use this immediately after a controller reset.
5513 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5517 struct CommandListHeader CommandHeader;
5518 struct RequestBlock Request;
5519 struct ErrDescriptor ErrorDescriptor;
5521 struct Command *cmd;
5522 static const size_t cmd_sz = sizeof(*cmd) +
5523 sizeof(cmd->ErrorDescriptor);
5527 void __iomem *vaddr;
5530 vaddr = pci_ioremap_bar(pdev, 0);
5534 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5535 * CCISS commands, so they must be allocated from the lower 4GiB of
5538 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5544 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5550 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5551 * although there's no guarantee, we assume that the address is at
5552 * least 4-byte aligned (most likely, it's page-aligned).
5554 paddr32 = cpu_to_le32(paddr64);
5556 cmd->CommandHeader.ReplyQueue = 0;
5557 cmd->CommandHeader.SGList = 0;
5558 cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5559 cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5560 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5562 cmd->Request.CDBLen = 16;
5563 cmd->Request.type_attr_dir =
5564 TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5565 cmd->Request.Timeout = 0; /* Don't time out */
5566 cmd->Request.CDB[0] = opcode;
5567 cmd->Request.CDB[1] = type;
5568 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5569 cmd->ErrorDescriptor.Addr =
5570 cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5571 cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5573 writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5575 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5576 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5577 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5579 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5584 /* we leak the DMA buffer here ... no choice since the controller could
5585 * still complete the command.
5587 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5588 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5593 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5595 if (tag & HPSA_ERROR_BIT) {
5596 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5601 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5606 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5608 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5609 void __iomem *vaddr, u32 use_doorbell)
5613 /* For everything after the P600, the PCI power state method
5614 * of resetting the controller doesn't work, so we have this
5615 * other way using the doorbell register.
5617 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5618 writel(use_doorbell, vaddr + SA5_DOORBELL);
5620 /* PMC hardware guys tell us we need a 10 second delay after
5621 * doorbell reset and before any attempt to talk to the board
5622 * at all to ensure that this actually works and doesn't fall
5623 * over in some weird corner cases.
5626 } else { /* Try to do it the PCI power state way */
5628 /* Quoting from the Open CISS Specification: "The Power
5629 * Management Control/Status Register (CSR) controls the power
5630 * state of the device. The normal operating state is D0,
5631 * CSR=00h. The software off state is D3, CSR=03h. To reset
5632 * the controller, place the interface device in D3 then to D0,
5633 * this causes a secondary PCI reset which will reset the
5638 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5640 /* enter the D3hot power management state */
5641 rc = pci_set_power_state(pdev, PCI_D3hot);
5647 /* enter the D0 power management state */
5648 rc = pci_set_power_state(pdev, PCI_D0);
5653 * The P600 requires a small delay when changing states.
5654 * Otherwise we may think the board did not reset and we bail.
5655 * This for kdump only and is particular to the P600.
5662 static void init_driver_version(char *driver_version, int len)
5664 memset(driver_version, 0, len);
5665 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5668 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5670 char *driver_version;
5671 int i, size = sizeof(cfgtable->driver_version);
5673 driver_version = kmalloc(size, GFP_KERNEL);
5674 if (!driver_version)
5677 init_driver_version(driver_version, size);
5678 for (i = 0; i < size; i++)
5679 writeb(driver_version[i], &cfgtable->driver_version[i]);
5680 kfree(driver_version);
5684 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5685 unsigned char *driver_ver)
5689 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5690 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5693 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5696 char *driver_ver, *old_driver_ver;
5697 int rc, size = sizeof(cfgtable->driver_version);
5699 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5700 if (!old_driver_ver)
5702 driver_ver = old_driver_ver + size;
5704 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5705 * should have been changed, otherwise we know the reset failed.
5707 init_driver_version(old_driver_ver, size);
5708 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5709 rc = !memcmp(driver_ver, old_driver_ver, size);
5710 kfree(old_driver_ver);
5713 /* This does a hard reset of the controller using PCI power management
5714 * states or the using the doorbell register.
5716 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5720 u64 cfg_base_addr_index;
5721 void __iomem *vaddr;
5722 unsigned long paddr;
5723 u32 misc_fw_support;
5725 struct CfgTable __iomem *cfgtable;
5728 u16 command_register;
5730 /* For controllers as old as the P600, this is very nearly
5733 * pci_save_state(pci_dev);
5734 * pci_set_power_state(pci_dev, PCI_D3hot);
5735 * pci_set_power_state(pci_dev, PCI_D0);
5736 * pci_restore_state(pci_dev);
5738 * For controllers newer than the P600, the pci power state
5739 * method of resetting doesn't work so we have another way
5740 * using the doorbell register.
5743 rc = hpsa_lookup_board_id(pdev, &board_id);
5745 dev_warn(&pdev->dev, "Board ID not found\n");
5748 if (!ctlr_is_resettable(board_id)) {
5749 dev_warn(&pdev->dev, "Controller not resettable\n");
5753 /* if controller is soft- but not hard resettable... */
5754 if (!ctlr_is_hard_resettable(board_id))
5755 return -ENOTSUPP; /* try soft reset later. */
5757 /* Save the PCI command register */
5758 pci_read_config_word(pdev, 4, &command_register);
5759 pci_save_state(pdev);
5761 /* find the first memory BAR, so we can find the cfg table */
5762 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5765 vaddr = remap_pci_mem(paddr, 0x250);
5769 /* find cfgtable in order to check if reset via doorbell is supported */
5770 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5771 &cfg_base_addr_index, &cfg_offset);
5774 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5775 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5780 rc = write_driver_ver_to_cfgtable(cfgtable);
5782 goto unmap_cfgtable;
5784 /* If reset via doorbell register is supported, use that.
5785 * There are two such methods. Favor the newest method.
5787 misc_fw_support = readl(&cfgtable->misc_fw_support);
5788 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5790 use_doorbell = DOORBELL_CTLR_RESET2;
5792 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5794 dev_warn(&pdev->dev,
5795 "Soft reset not supported. Firmware update is required.\n");
5796 rc = -ENOTSUPP; /* try soft reset */
5797 goto unmap_cfgtable;
5801 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5803 goto unmap_cfgtable;
5805 pci_restore_state(pdev);
5806 pci_write_config_word(pdev, 4, command_register);
5808 /* Some devices (notably the HP Smart Array 5i Controller)
5809 need a little pause here */
5810 msleep(HPSA_POST_RESET_PAUSE_MSECS);
5812 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5814 dev_warn(&pdev->dev,
5815 "Failed waiting for board to become ready after hard reset\n");
5816 goto unmap_cfgtable;
5819 rc = controller_reset_failed(vaddr);
5821 goto unmap_cfgtable;
5823 dev_warn(&pdev->dev, "Unable to successfully reset "
5824 "controller. Will try soft reset.\n");
5827 dev_info(&pdev->dev, "board ready after hard reset.\n");
5839 * We cannot read the structure directly, for portability we must use
5841 * This is for debug only.
5843 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
5849 dev_info(dev, "Controller Configuration information\n");
5850 dev_info(dev, "------------------------------------\n");
5851 for (i = 0; i < 4; i++)
5852 temp_name[i] = readb(&(tb->Signature[i]));
5853 temp_name[4] = '\0';
5854 dev_info(dev, " Signature = %s\n", temp_name);
5855 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
5856 dev_info(dev, " Transport methods supported = 0x%x\n",
5857 readl(&(tb->TransportSupport)));
5858 dev_info(dev, " Transport methods active = 0x%x\n",
5859 readl(&(tb->TransportActive)));
5860 dev_info(dev, " Requested transport Method = 0x%x\n",
5861 readl(&(tb->HostWrite.TransportRequest)));
5862 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
5863 readl(&(tb->HostWrite.CoalIntDelay)));
5864 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
5865 readl(&(tb->HostWrite.CoalIntCount)));
5866 dev_info(dev, " Max outstanding commands = %d\n",
5867 readl(&(tb->CmdsOutMax)));
5868 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
5869 for (i = 0; i < 16; i++)
5870 temp_name[i] = readb(&(tb->ServerName[i]));
5871 temp_name[16] = '\0';
5872 dev_info(dev, " Server Name = %s\n", temp_name);
5873 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
5874 readl(&(tb->HeartBeat)));
5875 #endif /* HPSA_DEBUG */
5878 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5880 int i, offset, mem_type, bar_type;
5882 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
5885 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5886 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
5887 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
5890 mem_type = pci_resource_flags(pdev, i) &
5891 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
5893 case PCI_BASE_ADDRESS_MEM_TYPE_32:
5894 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
5895 offset += 4; /* 32 bit */
5897 case PCI_BASE_ADDRESS_MEM_TYPE_64:
5900 default: /* reserved in PCI 2.2 */
5901 dev_warn(&pdev->dev,
5902 "base address is invalid\n");
5907 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
5913 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
5914 * controllers that are capable. If not, we use legacy INTx mode.
5917 static void hpsa_interrupt_mode(struct ctlr_info *h)
5919 #ifdef CONFIG_PCI_MSI
5921 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
5923 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
5924 hpsa_msix_entries[i].vector = 0;
5925 hpsa_msix_entries[i].entry = i;
5928 /* Some boards advertise MSI but don't really support it */
5929 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
5930 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
5931 goto default_int_mode;
5932 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
5933 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
5934 h->msix_vector = MAX_REPLY_QUEUES;
5935 if (h->msix_vector > num_online_cpus())
5936 h->msix_vector = num_online_cpus();
5937 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
5940 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
5942 goto single_msi_mode;
5943 } else if (err < h->msix_vector) {
5944 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
5945 "available\n", err);
5947 h->msix_vector = err;
5948 for (i = 0; i < h->msix_vector; i++)
5949 h->intr[i] = hpsa_msix_entries[i].vector;
5953 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
5954 dev_info(&h->pdev->dev, "MSI capable controller\n");
5955 if (!pci_enable_msi(h->pdev))
5958 dev_warn(&h->pdev->dev, "MSI init failed\n");
5961 #endif /* CONFIG_PCI_MSI */
5962 /* if we get here we're going to use the default interrupt mode */
5963 h->intr[h->intr_mode] = h->pdev->irq;
5966 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
5969 u32 subsystem_vendor_id, subsystem_device_id;
5971 subsystem_vendor_id = pdev->subsystem_vendor;
5972 subsystem_device_id = pdev->subsystem_device;
5973 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
5974 subsystem_vendor_id;
5976 for (i = 0; i < ARRAY_SIZE(products); i++)
5977 if (*board_id == products[i].board_id)
5980 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
5981 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
5983 dev_warn(&pdev->dev, "unrecognized board ID: "
5984 "0x%08x, ignoring.\n", *board_id);
5987 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
5990 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
5991 unsigned long *memory_bar)
5995 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
5996 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
5997 /* addressing mode bits already removed */
5998 *memory_bar = pci_resource_start(pdev, i);
5999 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6003 dev_warn(&pdev->dev, "no memory BAR found\n");
6007 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6013 iterations = HPSA_BOARD_READY_ITERATIONS;
6015 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6017 for (i = 0; i < iterations; i++) {
6018 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6019 if (wait_for_ready) {
6020 if (scratchpad == HPSA_FIRMWARE_READY)
6023 if (scratchpad != HPSA_FIRMWARE_READY)
6026 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6028 dev_warn(&pdev->dev, "board not ready, timed out.\n");
6032 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6033 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6036 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6037 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6038 *cfg_base_addr &= (u32) 0x0000ffff;
6039 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6040 if (*cfg_base_addr_index == -1) {
6041 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6047 static int hpsa_find_cfgtables(struct ctlr_info *h)
6051 u64 cfg_base_addr_index;
6055 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6056 &cfg_base_addr_index, &cfg_offset);
6059 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6060 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6062 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6065 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6068 /* Find performant mode table. */
6069 trans_offset = readl(&h->cfgtable->TransMethodOffset);
6070 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6071 cfg_base_addr_index)+cfg_offset+trans_offset,
6072 sizeof(*h->transtable));
6078 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6080 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6082 /* Limit commands in memory limited kdump scenario. */
6083 if (reset_devices && h->max_commands > 32)
6084 h->max_commands = 32;
6086 if (h->max_commands < 16) {
6087 dev_warn(&h->pdev->dev, "Controller reports "
6088 "max supported commands of %d, an obvious lie. "
6089 "Using 16. Ensure that firmware is up to date.\n",
6091 h->max_commands = 16;
6095 /* If the controller reports that the total max sg entries is greater than 512,
6096 * then we know that chained SG blocks work. (Original smart arrays did not
6097 * support chained SG blocks and would return zero for max sg entries.)
6099 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6101 return h->maxsgentries > 512;
6104 /* Interrogate the hardware for some limits:
6105 * max commands, max SG elements without chaining, and with chaining,
6106 * SG chain block size, etc.
6108 static void hpsa_find_board_params(struct ctlr_info *h)
6110 hpsa_get_max_perf_mode_cmds(h);
6111 h->nr_cmds = h->max_commands;
6112 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6113 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6114 if (hpsa_supports_chained_sg_blocks(h)) {
6115 /* Limit in-command s/g elements to 32 save dma'able memory. */
6116 h->max_cmd_sg_entries = 32;
6117 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6118 h->maxsgentries--; /* save one for chain pointer */
6121 * Original smart arrays supported at most 31 s/g entries
6122 * embedded inline in the command (trying to use more
6123 * would lock up the controller)
6125 h->max_cmd_sg_entries = 31;
6126 h->maxsgentries = 31; /* default to traditional values */
6130 /* Find out what task management functions are supported and cache */
6131 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6132 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6133 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6134 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6135 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6138 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6140 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6141 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6147 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6151 driver_support = readl(&(h->cfgtable->driver_support));
6152 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6154 driver_support |= ENABLE_SCSI_PREFETCH;
6156 driver_support |= ENABLE_UNIT_ATTN;
6157 writel(driver_support, &(h->cfgtable->driver_support));
6160 /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6161 * in a prefetch beyond physical memory.
6163 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6167 if (h->board_id != 0x3225103C)
6169 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6170 dma_prefetch |= 0x8000;
6171 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6174 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6178 unsigned long flags;
6179 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6180 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6181 spin_lock_irqsave(&h->lock, flags);
6182 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6183 spin_unlock_irqrestore(&h->lock, flags);
6184 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6186 /* delay and try again */
6191 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6195 unsigned long flags;
6197 /* under certain very rare conditions, this can take awhile.
6198 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6199 * as we enter this code.)
6201 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6202 spin_lock_irqsave(&h->lock, flags);
6203 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6204 spin_unlock_irqrestore(&h->lock, flags);
6205 if (!(doorbell_value & CFGTBL_ChangeReq))
6207 /* delay and try again */
6208 usleep_range(10000, 20000);
6212 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6216 trans_support = readl(&(h->cfgtable->TransportSupport));
6217 if (!(trans_support & SIMPLE_MODE))
6220 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6222 /* Update the field, and then ring the doorbell */
6223 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6224 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6225 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6226 hpsa_wait_for_mode_change_ack(h);
6227 print_cfg_table(&h->pdev->dev, h->cfgtable);
6228 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6230 h->transMethod = CFGTBL_Trans_Simple;
6233 dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6237 static int hpsa_pci_init(struct ctlr_info *h)
6239 int prod_index, err;
6241 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6244 h->product_name = products[prod_index].product_name;
6245 h->access = *(products[prod_index].access);
6247 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6248 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6250 err = pci_enable_device(h->pdev);
6252 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6256 err = pci_request_regions(h->pdev, HPSA);
6258 dev_err(&h->pdev->dev,
6259 "cannot obtain PCI resources, aborting\n");
6263 pci_set_master(h->pdev);
6265 hpsa_interrupt_mode(h);
6266 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6268 goto err_out_free_res;
6269 h->vaddr = remap_pci_mem(h->paddr, 0x250);
6272 goto err_out_free_res;
6274 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6276 goto err_out_free_res;
6277 err = hpsa_find_cfgtables(h);
6279 goto err_out_free_res;
6280 hpsa_find_board_params(h);
6282 if (!hpsa_CISS_signature_present(h)) {
6284 goto err_out_free_res;
6286 hpsa_set_driver_support_bits(h);
6287 hpsa_p600_dma_prefetch_quirk(h);
6288 err = hpsa_enter_simple_mode(h);
6290 goto err_out_free_res;
6295 iounmap(h->transtable);
6297 iounmap(h->cfgtable);
6300 pci_disable_device(h->pdev);
6301 pci_release_regions(h->pdev);
6305 static void hpsa_hba_inquiry(struct ctlr_info *h)
6309 #define HBA_INQUIRY_BYTE_COUNT 64
6310 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6311 if (!h->hba_inquiry_data)
6313 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6314 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6316 kfree(h->hba_inquiry_data);
6317 h->hba_inquiry_data = NULL;
6321 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6324 void __iomem *vaddr;
6329 /* kdump kernel is loading, we don't know in which state is
6330 * the pci interface. The dev->enable_cnt is equal zero
6331 * so we call enable+disable, wait a while and switch it on.
6333 rc = pci_enable_device(pdev);
6335 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6338 pci_disable_device(pdev);
6339 msleep(260); /* a randomly chosen number */
6340 rc = pci_enable_device(pdev);
6342 dev_warn(&pdev->dev, "failed to enable device.\n");
6346 pci_set_master(pdev);
6348 vaddr = pci_ioremap_bar(pdev, 0);
6349 if (vaddr == NULL) {
6353 writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6356 /* Reset the controller with a PCI power-cycle or via doorbell */
6357 rc = hpsa_kdump_hard_reset_controller(pdev);
6359 /* -ENOTSUPP here means we cannot reset the controller
6360 * but it's already (and still) up and running in
6361 * "performant mode". Or, it might be 640x, which can't reset
6362 * due to concerns about shared bbwc between 6402/6404 pair.
6367 /* Now try to get the controller to respond to a no-op */
6368 dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6369 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6370 if (hpsa_noop(pdev) == 0)
6373 dev_warn(&pdev->dev, "no-op failed%s\n",
6374 (i < 11 ? "; re-trying" : ""));
6379 pci_disable_device(pdev);
6383 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6385 h->cmd_pool_bits = kzalloc(
6386 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6387 sizeof(unsigned long), GFP_KERNEL);
6388 h->cmd_pool = pci_alloc_consistent(h->pdev,
6389 h->nr_cmds * sizeof(*h->cmd_pool),
6390 &(h->cmd_pool_dhandle));
6391 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6392 h->nr_cmds * sizeof(*h->errinfo_pool),
6393 &(h->errinfo_pool_dhandle));
6394 if ((h->cmd_pool_bits == NULL)
6395 || (h->cmd_pool == NULL)
6396 || (h->errinfo_pool == NULL)) {
6397 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6402 hpsa_free_cmd_pool(h);
6406 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6408 kfree(h->cmd_pool_bits);
6410 pci_free_consistent(h->pdev,
6411 h->nr_cmds * sizeof(struct CommandList),
6412 h->cmd_pool, h->cmd_pool_dhandle);
6413 if (h->ioaccel2_cmd_pool)
6414 pci_free_consistent(h->pdev,
6415 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6416 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6417 if (h->errinfo_pool)
6418 pci_free_consistent(h->pdev,
6419 h->nr_cmds * sizeof(struct ErrorInfo),
6421 h->errinfo_pool_dhandle);
6422 if (h->ioaccel_cmd_pool)
6423 pci_free_consistent(h->pdev,
6424 h->nr_cmds * sizeof(struct io_accel1_cmd),
6425 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6428 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6432 cpu = cpumask_first(cpu_online_mask);
6433 for (i = 0; i < h->msix_vector; i++) {
6434 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6435 cpu = cpumask_next(cpu, cpu_online_mask);
6439 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6440 static void hpsa_free_irqs(struct ctlr_info *h)
6444 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6445 /* Single reply queue, only one irq to free */
6447 irq_set_affinity_hint(h->intr[i], NULL);
6448 free_irq(h->intr[i], &h->q[i]);
6452 for (i = 0; i < h->msix_vector; i++) {
6453 irq_set_affinity_hint(h->intr[i], NULL);
6454 free_irq(h->intr[i], &h->q[i]);
6456 for (; i < MAX_REPLY_QUEUES; i++)
6460 /* returns 0 on success; cleans up and returns -Enn on error */
6461 static int hpsa_request_irqs(struct ctlr_info *h,
6462 irqreturn_t (*msixhandler)(int, void *),
6463 irqreturn_t (*intxhandler)(int, void *))
6468 * initialize h->q[x] = x so that interrupt handlers know which
6471 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6474 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6475 /* If performant mode and MSI-X, use multiple reply queues */
6476 for (i = 0; i < h->msix_vector; i++) {
6477 rc = request_irq(h->intr[i], msixhandler,
6483 dev_err(&h->pdev->dev,
6484 "failed to get irq %d for %s\n",
6485 h->intr[i], h->devname);
6486 for (j = 0; j < i; j++) {
6487 free_irq(h->intr[j], &h->q[j]);
6490 for (; j < MAX_REPLY_QUEUES; j++)
6495 hpsa_irq_affinity_hints(h);
6497 /* Use single reply pool */
6498 if (h->msix_vector > 0 || h->msi_vector) {
6499 rc = request_irq(h->intr[h->intr_mode],
6500 msixhandler, 0, h->devname,
6501 &h->q[h->intr_mode]);
6503 rc = request_irq(h->intr[h->intr_mode],
6504 intxhandler, IRQF_SHARED, h->devname,
6505 &h->q[h->intr_mode]);
6509 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6510 h->intr[h->intr_mode], h->devname);
6516 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6518 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6519 HPSA_RESET_TYPE_CONTROLLER)) {
6520 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6524 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6525 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6526 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6530 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6531 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6532 dev_warn(&h->pdev->dev, "Board failed to become ready "
6533 "after soft reset.\n");
6540 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6543 #ifdef CONFIG_PCI_MSI
6544 if (h->msix_vector) {
6545 if (h->pdev->msix_enabled)
6546 pci_disable_msix(h->pdev);
6547 } else if (h->msi_vector) {
6548 if (h->pdev->msi_enabled)
6549 pci_disable_msi(h->pdev);
6551 #endif /* CONFIG_PCI_MSI */
6554 static void hpsa_free_reply_queues(struct ctlr_info *h)
6558 for (i = 0; i < h->nreply_queues; i++) {
6559 if (!h->reply_queue[i].head)
6561 pci_free_consistent(h->pdev, h->reply_queue_size,
6562 h->reply_queue[i].head, h->reply_queue[i].busaddr);
6563 h->reply_queue[i].head = NULL;
6564 h->reply_queue[i].busaddr = 0;
6568 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6570 hpsa_free_irqs_and_disable_msix(h);
6571 hpsa_free_sg_chain_blocks(h);
6572 hpsa_free_cmd_pool(h);
6573 kfree(h->ioaccel1_blockFetchTable);
6574 kfree(h->blockFetchTable);
6575 hpsa_free_reply_queues(h);
6579 iounmap(h->transtable);
6581 iounmap(h->cfgtable);
6582 pci_disable_device(h->pdev);
6583 pci_release_regions(h->pdev);
6587 /* Called when controller lockup detected. */
6588 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6591 struct CommandList *c;
6593 flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6594 for (i = 0; i < h->nr_cmds; i++) {
6595 c = h->cmd_pool + i;
6596 refcount = atomic_inc_return(&c->refcount);
6598 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6605 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6609 cpu = cpumask_first(cpu_online_mask);
6610 for (i = 0; i < num_online_cpus(); i++) {
6611 u32 *lockup_detected;
6612 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6613 *lockup_detected = value;
6614 cpu = cpumask_next(cpu, cpu_online_mask);
6616 wmb(); /* be sure the per-cpu variables are out to memory */
6619 static void controller_lockup_detected(struct ctlr_info *h)
6621 unsigned long flags;
6622 u32 lockup_detected;
6624 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6625 spin_lock_irqsave(&h->lock, flags);
6626 lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6627 if (!lockup_detected) {
6628 /* no heartbeat, but controller gave us a zero. */
6629 dev_warn(&h->pdev->dev,
6630 "lockup detected but scratchpad register is zero\n");
6631 lockup_detected = 0xffffffff;
6633 set_lockup_detected_for_all_cpus(h, lockup_detected);
6634 spin_unlock_irqrestore(&h->lock, flags);
6635 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6637 pci_disable_device(h->pdev);
6638 fail_all_outstanding_cmds(h);
6641 static void detect_controller_lockup(struct ctlr_info *h)
6645 unsigned long flags;
6647 now = get_jiffies_64();
6648 /* If we've received an interrupt recently, we're ok. */
6649 if (time_after64(h->last_intr_timestamp +
6650 (h->heartbeat_sample_interval), now))
6654 * If we've already checked the heartbeat recently, we're ok.
6655 * This could happen if someone sends us a signal. We
6656 * otherwise don't care about signals in this thread.
6658 if (time_after64(h->last_heartbeat_timestamp +
6659 (h->heartbeat_sample_interval), now))
6662 /* If heartbeat has not changed since we last looked, we're not ok. */
6663 spin_lock_irqsave(&h->lock, flags);
6664 heartbeat = readl(&h->cfgtable->HeartBeat);
6665 spin_unlock_irqrestore(&h->lock, flags);
6666 if (h->last_heartbeat == heartbeat) {
6667 controller_lockup_detected(h);
6672 h->last_heartbeat = heartbeat;
6673 h->last_heartbeat_timestamp = now;
6676 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6681 /* Ask the controller to clear the events we're handling. */
6682 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6683 | CFGTBL_Trans_io_accel2)) &&
6684 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6685 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6687 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6688 event_type = "state change";
6689 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6690 event_type = "configuration change";
6691 /* Stop sending new RAID offload reqs via the IO accelerator */
6692 scsi_block_requests(h->scsi_host);
6693 for (i = 0; i < h->ndevices; i++)
6694 h->dev[i]->offload_enabled = 0;
6695 hpsa_drain_accel_commands(h);
6696 /* Set 'accelerator path config change' bit */
6697 dev_warn(&h->pdev->dev,
6698 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6699 h->events, event_type);
6700 writel(h->events, &(h->cfgtable->clear_event_notify));
6701 /* Set the "clear event notify field update" bit 6 */
6702 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6703 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6704 hpsa_wait_for_clear_event_notify_ack(h);
6705 scsi_unblock_requests(h->scsi_host);
6707 /* Acknowledge controller notification events. */
6708 writel(h->events, &(h->cfgtable->clear_event_notify));
6709 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6710 hpsa_wait_for_clear_event_notify_ack(h);
6712 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6713 hpsa_wait_for_mode_change_ack(h);
6719 /* Check a register on the controller to see if there are configuration
6720 * changes (added/changed/removed logical drives, etc.) which mean that
6721 * we should rescan the controller for devices.
6722 * Also check flag for driver-initiated rescan.
6724 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6726 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6729 h->events = readl(&(h->cfgtable->event_notify));
6730 return h->events & RESCAN_REQUIRED_EVENT_BITS;
6734 * Check if any of the offline devices have become ready
6736 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6738 unsigned long flags;
6739 struct offline_device_entry *d;
6740 struct list_head *this, *tmp;
6742 spin_lock_irqsave(&h->offline_device_lock, flags);
6743 list_for_each_safe(this, tmp, &h->offline_device_list) {
6744 d = list_entry(this, struct offline_device_entry,
6746 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6747 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6748 spin_lock_irqsave(&h->offline_device_lock, flags);
6749 list_del(&d->offline_list);
6750 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6753 spin_lock_irqsave(&h->offline_device_lock, flags);
6755 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6760 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6762 unsigned long flags;
6763 struct ctlr_info *h = container_of(to_delayed_work(work),
6764 struct ctlr_info, monitor_ctlr_work);
6765 detect_controller_lockup(h);
6766 if (lockup_detected(h))
6769 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6770 scsi_host_get(h->scsi_host);
6771 hpsa_ack_ctlr_events(h);
6772 hpsa_scan_start(h->scsi_host);
6773 scsi_host_put(h->scsi_host);
6776 spin_lock_irqsave(&h->lock, flags);
6777 if (h->remove_in_progress) {
6778 spin_unlock_irqrestore(&h->lock, flags);
6781 schedule_delayed_work(&h->monitor_ctlr_work,
6782 h->heartbeat_sample_interval);
6783 spin_unlock_irqrestore(&h->lock, flags);
6786 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6789 struct ctlr_info *h;
6790 int try_soft_reset = 0;
6791 unsigned long flags;
6793 if (number_of_controllers == 0)
6794 printk(KERN_INFO DRIVER_NAME "\n");
6796 rc = hpsa_init_reset_devices(pdev);
6798 if (rc != -ENOTSUPP)
6800 /* If the reset fails in a particular way (it has no way to do
6801 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6802 * a soft reset once we get the controller configured up to the
6803 * point that it can accept a command.
6809 reinit_after_soft_reset:
6811 /* Command structures must be aligned on a 32-byte boundary because
6812 * the 5 lower bits of the address are used by the hardware. and by
6813 * the driver. See comments in hpsa.h for more info.
6815 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6816 h = kzalloc(sizeof(*h), GFP_KERNEL);
6821 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6822 INIT_LIST_HEAD(&h->offline_device_list);
6823 spin_lock_init(&h->lock);
6824 spin_lock_init(&h->offline_device_lock);
6825 spin_lock_init(&h->scan_lock);
6826 atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
6828 h->resubmit_wq = alloc_workqueue("hpsa", WQ_MEM_RECLAIM, 0);
6829 if (!h->resubmit_wq) {
6830 dev_err(&h->pdev->dev, "Failed to allocate work queue\n");
6834 /* Allocate and clear per-cpu variable lockup_detected */
6835 h->lockup_detected = alloc_percpu(u32);
6836 if (!h->lockup_detected) {
6840 set_lockup_detected_for_all_cpus(h, 0);
6842 rc = hpsa_pci_init(h);
6846 sprintf(h->devname, HPSA "%d", number_of_controllers);
6847 h->ctlr = number_of_controllers;
6848 number_of_controllers++;
6850 /* configure PCI DMA stuff */
6851 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6855 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6859 dev_err(&pdev->dev, "no suitable DMA available\n");
6864 /* make sure the board interrupts are off */
6865 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6867 if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
6869 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6870 h->devname, pdev->device,
6871 h->intr[h->intr_mode], dac ? "" : " not");
6872 rc = hpsa_allocate_cmd_pool(h);
6874 goto clean2_and_free_irqs;
6875 if (hpsa_allocate_sg_chain_blocks(h))
6877 init_waitqueue_head(&h->scan_wait_queue);
6878 h->scan_finished = 1; /* no scan currently in progress */
6880 pci_set_drvdata(pdev, h);
6882 h->hba_mode_enabled = 0;
6883 h->scsi_host = NULL;
6884 spin_lock_init(&h->devlock);
6885 hpsa_put_ctlr_into_performant_mode(h);
6887 /* At this point, the controller is ready to take commands.
6888 * Now, if reset_devices and the hard reset didn't work, try
6889 * the soft reset and see if that works.
6891 if (try_soft_reset) {
6893 /* This is kind of gross. We may or may not get a completion
6894 * from the soft reset command, and if we do, then the value
6895 * from the fifo may or may not be valid. So, we wait 10 secs
6896 * after the reset throwing away any completions we get during
6897 * that time. Unregister the interrupt handler and register
6898 * fake ones to scoop up any residual completions.
6900 spin_lock_irqsave(&h->lock, flags);
6901 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6902 spin_unlock_irqrestore(&h->lock, flags);
6904 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
6905 hpsa_intx_discard_completions);
6907 dev_warn(&h->pdev->dev,
6908 "Failed to request_irq after soft reset.\n");
6912 rc = hpsa_kdump_soft_reset(h);
6914 /* Neither hard nor soft reset worked, we're hosed. */
6917 dev_info(&h->pdev->dev, "Board READY.\n");
6918 dev_info(&h->pdev->dev,
6919 "Waiting for stale completions to drain.\n");
6920 h->access.set_intr_mask(h, HPSA_INTR_ON);
6922 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6924 rc = controller_reset_failed(h->cfgtable);
6926 dev_info(&h->pdev->dev,
6927 "Soft reset appears to have failed.\n");
6929 /* since the controller's reset, we have to go back and re-init
6930 * everything. Easiest to just forget what we've done and do it
6933 hpsa_undo_allocations_after_kdump_soft_reset(h);
6936 /* don't go to clean4, we already unallocated */
6939 goto reinit_after_soft_reset;
6942 /* Enable Accelerated IO path at driver layer */
6943 h->acciopath_status = 1;
6946 /* Turn the interrupts on so we can service requests */
6947 h->access.set_intr_mask(h, HPSA_INTR_ON);
6949 hpsa_hba_inquiry(h);
6950 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
6952 /* Monitor the controller for firmware lockups */
6953 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
6954 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
6955 schedule_delayed_work(&h->monitor_ctlr_work,
6956 h->heartbeat_sample_interval);
6960 hpsa_free_sg_chain_blocks(h);
6961 hpsa_free_cmd_pool(h);
6962 clean2_and_free_irqs:
6967 destroy_workqueue(h->resubmit_wq);
6968 if (h->lockup_detected)
6969 free_percpu(h->lockup_detected);
6974 static void hpsa_flush_cache(struct ctlr_info *h)
6977 struct CommandList *c;
6979 /* Don't bother trying to flush the cache if locked up */
6980 if (unlikely(lockup_detected(h)))
6982 flush_buf = kzalloc(4, GFP_KERNEL);
6988 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
6991 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
6992 RAID_CTLR_LUNID, TYPE_CMD)) {
6995 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
6996 if (c->err_info->CommandStatus != 0)
6998 dev_warn(&h->pdev->dev,
6999 "error flushing cache on controller\n");
7005 static void hpsa_shutdown(struct pci_dev *pdev)
7007 struct ctlr_info *h;
7009 h = pci_get_drvdata(pdev);
7010 /* Turn board interrupts off and send the flush cache command
7011 * sendcmd will turn off interrupt, and send the flush...
7012 * To write all data in the battery backed cache to disks
7014 hpsa_flush_cache(h);
7015 h->access.set_intr_mask(h, HPSA_INTR_OFF);
7016 hpsa_free_irqs_and_disable_msix(h);
7019 static void hpsa_free_device_info(struct ctlr_info *h)
7023 for (i = 0; i < h->ndevices; i++)
7027 static void hpsa_remove_one(struct pci_dev *pdev)
7029 struct ctlr_info *h;
7030 unsigned long flags;
7032 if (pci_get_drvdata(pdev) == NULL) {
7033 dev_err(&pdev->dev, "unable to remove device\n");
7036 h = pci_get_drvdata(pdev);
7038 /* Get rid of any controller monitoring work items */
7039 spin_lock_irqsave(&h->lock, flags);
7040 h->remove_in_progress = 1;
7041 cancel_delayed_work(&h->monitor_ctlr_work);
7042 spin_unlock_irqrestore(&h->lock, flags);
7043 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7044 hpsa_shutdown(pdev);
7045 destroy_workqueue(h->resubmit_wq);
7047 iounmap(h->transtable);
7048 iounmap(h->cfgtable);
7049 hpsa_free_device_info(h);
7050 hpsa_free_sg_chain_blocks(h);
7051 pci_free_consistent(h->pdev,
7052 h->nr_cmds * sizeof(struct CommandList),
7053 h->cmd_pool, h->cmd_pool_dhandle);
7054 pci_free_consistent(h->pdev,
7055 h->nr_cmds * sizeof(struct ErrorInfo),
7056 h->errinfo_pool, h->errinfo_pool_dhandle);
7057 hpsa_free_reply_queues(h);
7058 kfree(h->cmd_pool_bits);
7059 kfree(h->blockFetchTable);
7060 kfree(h->ioaccel1_blockFetchTable);
7061 kfree(h->ioaccel2_blockFetchTable);
7062 kfree(h->hba_inquiry_data);
7063 pci_disable_device(pdev);
7064 pci_release_regions(pdev);
7065 free_percpu(h->lockup_detected);
7069 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7070 __attribute__((unused)) pm_message_t state)
7075 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7080 static struct pci_driver hpsa_pci_driver = {
7082 .probe = hpsa_init_one,
7083 .remove = hpsa_remove_one,
7084 .id_table = hpsa_pci_device_id, /* id_table */
7085 .shutdown = hpsa_shutdown,
7086 .suspend = hpsa_suspend,
7087 .resume = hpsa_resume,
7090 /* Fill in bucket_map[], given nsgs (the max number of
7091 * scatter gather elements supported) and bucket[],
7092 * which is an array of 8 integers. The bucket[] array
7093 * contains 8 different DMA transfer sizes (in 16
7094 * byte increments) which the controller uses to fetch
7095 * commands. This function fills in bucket_map[], which
7096 * maps a given number of scatter gather elements to one of
7097 * the 8 DMA transfer sizes. The point of it is to allow the
7098 * controller to only do as much DMA as needed to fetch the
7099 * command, with the DMA transfer size encoded in the lower
7100 * bits of the command address.
7102 static void calc_bucket_map(int bucket[], int num_buckets,
7103 int nsgs, int min_blocks, u32 *bucket_map)
7107 /* Note, bucket_map must have nsgs+1 entries. */
7108 for (i = 0; i <= nsgs; i++) {
7109 /* Compute size of a command with i SG entries */
7110 size = i + min_blocks;
7111 b = num_buckets; /* Assume the biggest bucket */
7112 /* Find the bucket that is just big enough */
7113 for (j = 0; j < num_buckets; j++) {
7114 if (bucket[j] >= size) {
7119 /* for a command with i SG entries, use bucket b. */
7124 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7127 unsigned long register_value;
7128 unsigned long transMethod = CFGTBL_Trans_Performant |
7129 (trans_support & CFGTBL_Trans_use_short_tags) |
7130 CFGTBL_Trans_enable_directed_msix |
7131 (trans_support & (CFGTBL_Trans_io_accel1 |
7132 CFGTBL_Trans_io_accel2));
7133 struct access_method access = SA5_performant_access;
7135 /* This is a bit complicated. There are 8 registers on
7136 * the controller which we write to to tell it 8 different
7137 * sizes of commands which there may be. It's a way of
7138 * reducing the DMA done to fetch each command. Encoded into
7139 * each command's tag are 3 bits which communicate to the controller
7140 * which of the eight sizes that command fits within. The size of
7141 * each command depends on how many scatter gather entries there are.
7142 * Each SG entry requires 16 bytes. The eight registers are programmed
7143 * with the number of 16-byte blocks a command of that size requires.
7144 * The smallest command possible requires 5 such 16 byte blocks.
7145 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7146 * blocks. Note, this only extends to the SG entries contained
7147 * within the command block, and does not extend to chained blocks
7148 * of SG elements. bft[] contains the eight values we write to
7149 * the registers. They are not evenly distributed, but have more
7150 * sizes for small commands, and fewer sizes for larger commands.
7152 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7153 #define MIN_IOACCEL2_BFT_ENTRY 5
7154 #define HPSA_IOACCEL2_HEADER_SZ 4
7155 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7156 13, 14, 15, 16, 17, 18, 19,
7157 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7158 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7159 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7160 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7161 16 * MIN_IOACCEL2_BFT_ENTRY);
7162 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7163 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7164 /* 5 = 1 s/g entry or 4k
7165 * 6 = 2 s/g entry or 8k
7166 * 8 = 4 s/g entry or 16k
7167 * 10 = 6 s/g entry or 24k
7170 /* If the controller supports either ioaccel method then
7171 * we can also use the RAID stack submit path that does not
7172 * perform the superfluous readl() after each command submission.
7174 if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7175 access = SA5_performant_access_no_read;
7177 /* Controller spec: zero out this buffer. */
7178 for (i = 0; i < h->nreply_queues; i++)
7179 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7181 bft[7] = SG_ENTRIES_IN_CMD + 4;
7182 calc_bucket_map(bft, ARRAY_SIZE(bft),
7183 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7184 for (i = 0; i < 8; i++)
7185 writel(bft[i], &h->transtable->BlockFetch[i]);
7187 /* size of controller ring buffer */
7188 writel(h->max_commands, &h->transtable->RepQSize);
7189 writel(h->nreply_queues, &h->transtable->RepQCount);
7190 writel(0, &h->transtable->RepQCtrAddrLow32);
7191 writel(0, &h->transtable->RepQCtrAddrHigh32);
7193 for (i = 0; i < h->nreply_queues; i++) {
7194 writel(0, &h->transtable->RepQAddr[i].upper);
7195 writel(h->reply_queue[i].busaddr,
7196 &h->transtable->RepQAddr[i].lower);
7199 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7200 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7202 * enable outbound interrupt coalescing in accelerator mode;
7204 if (trans_support & CFGTBL_Trans_io_accel1) {
7205 access = SA5_ioaccel_mode1_access;
7206 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7207 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7209 if (trans_support & CFGTBL_Trans_io_accel2) {
7210 access = SA5_ioaccel_mode2_access;
7211 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7212 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7215 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7216 hpsa_wait_for_mode_change_ack(h);
7217 register_value = readl(&(h->cfgtable->TransportActive));
7218 if (!(register_value & CFGTBL_Trans_Performant)) {
7219 dev_err(&h->pdev->dev,
7220 "performant mode problem - transport not active\n");
7223 /* Change the access methods to the performant access methods */
7225 h->transMethod = transMethod;
7227 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7228 (trans_support & CFGTBL_Trans_io_accel2)))
7231 if (trans_support & CFGTBL_Trans_io_accel1) {
7232 /* Set up I/O accelerator mode */
7233 for (i = 0; i < h->nreply_queues; i++) {
7234 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7235 h->reply_queue[i].current_entry =
7236 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7238 bft[7] = h->ioaccel_maxsg + 8;
7239 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7240 h->ioaccel1_blockFetchTable);
7242 /* initialize all reply queue entries to unused */
7243 for (i = 0; i < h->nreply_queues; i++)
7244 memset(h->reply_queue[i].head,
7245 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7246 h->reply_queue_size);
7248 /* set all the constant fields in the accelerator command
7249 * frames once at init time to save CPU cycles later.
7251 for (i = 0; i < h->nr_cmds; i++) {
7252 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7254 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7255 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7256 (i * sizeof(struct ErrorInfo)));
7257 cp->err_info_len = sizeof(struct ErrorInfo);
7258 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7259 cp->host_context_flags =
7260 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7261 cp->timeout_sec = 0;
7264 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7266 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7267 (i * sizeof(struct io_accel1_cmd)));
7269 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7270 u64 cfg_offset, cfg_base_addr_index;
7271 u32 bft2_offset, cfg_base_addr;
7274 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7275 &cfg_base_addr_index, &cfg_offset);
7276 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7277 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7278 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7279 4, h->ioaccel2_blockFetchTable);
7280 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7281 BUILD_BUG_ON(offsetof(struct CfgTable,
7282 io_accel_request_size_offset) != 0xb8);
7283 h->ioaccel2_bft2_regs =
7284 remap_pci_mem(pci_resource_start(h->pdev,
7285 cfg_base_addr_index) +
7286 cfg_offset + bft2_offset,
7288 sizeof(*h->ioaccel2_bft2_regs));
7289 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7290 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7292 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7293 hpsa_wait_for_mode_change_ack(h);
7296 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7299 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7300 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7301 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7303 /* Command structures must be aligned on a 128-byte boundary
7304 * because the 7 lower bits of the address are used by the
7307 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7308 IOACCEL1_COMMANDLIST_ALIGNMENT);
7309 h->ioaccel_cmd_pool =
7310 pci_alloc_consistent(h->pdev,
7311 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7312 &(h->ioaccel_cmd_pool_dhandle));
7314 h->ioaccel1_blockFetchTable =
7315 kmalloc(((h->ioaccel_maxsg + 1) *
7316 sizeof(u32)), GFP_KERNEL);
7318 if ((h->ioaccel_cmd_pool == NULL) ||
7319 (h->ioaccel1_blockFetchTable == NULL))
7322 memset(h->ioaccel_cmd_pool, 0,
7323 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7327 if (h->ioaccel_cmd_pool)
7328 pci_free_consistent(h->pdev,
7329 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7330 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7331 kfree(h->ioaccel1_blockFetchTable);
7335 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7337 /* Allocate ioaccel2 mode command blocks and block fetch table */
7340 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7341 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7342 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7344 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7345 IOACCEL2_COMMANDLIST_ALIGNMENT);
7346 h->ioaccel2_cmd_pool =
7347 pci_alloc_consistent(h->pdev,
7348 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7349 &(h->ioaccel2_cmd_pool_dhandle));
7351 h->ioaccel2_blockFetchTable =
7352 kmalloc(((h->ioaccel_maxsg + 1) *
7353 sizeof(u32)), GFP_KERNEL);
7355 if ((h->ioaccel2_cmd_pool == NULL) ||
7356 (h->ioaccel2_blockFetchTable == NULL))
7359 memset(h->ioaccel2_cmd_pool, 0,
7360 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7364 if (h->ioaccel2_cmd_pool)
7365 pci_free_consistent(h->pdev,
7366 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7367 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7368 kfree(h->ioaccel2_blockFetchTable);
7372 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7375 unsigned long transMethod = CFGTBL_Trans_Performant |
7376 CFGTBL_Trans_use_short_tags;
7379 if (hpsa_simple_mode)
7382 trans_support = readl(&(h->cfgtable->TransportSupport));
7383 if (!(trans_support & PERFORMANT_MODE))
7386 /* Check for I/O accelerator mode support */
7387 if (trans_support & CFGTBL_Trans_io_accel1) {
7388 transMethod |= CFGTBL_Trans_io_accel1 |
7389 CFGTBL_Trans_enable_directed_msix;
7390 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7393 if (trans_support & CFGTBL_Trans_io_accel2) {
7394 transMethod |= CFGTBL_Trans_io_accel2 |
7395 CFGTBL_Trans_enable_directed_msix;
7396 if (ioaccel2_alloc_cmds_and_bft(h))
7401 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7402 hpsa_get_max_perf_mode_cmds(h);
7403 /* Performant mode ring buffer and supporting data structures */
7404 h->reply_queue_size = h->max_commands * sizeof(u64);
7406 for (i = 0; i < h->nreply_queues; i++) {
7407 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7408 h->reply_queue_size,
7409 &(h->reply_queue[i].busaddr));
7410 if (!h->reply_queue[i].head)
7412 h->reply_queue[i].size = h->max_commands;
7413 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7414 h->reply_queue[i].current_entry = 0;
7417 /* Need a block fetch table for performant mode */
7418 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7419 sizeof(u32)), GFP_KERNEL);
7420 if (!h->blockFetchTable)
7423 hpsa_enter_performant_mode(h, trans_support);
7427 hpsa_free_reply_queues(h);
7428 kfree(h->blockFetchTable);
7431 static int is_accelerated_cmd(struct CommandList *c)
7433 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7436 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7438 struct CommandList *c = NULL;
7439 int i, accel_cmds_out;
7442 do { /* wait for all outstanding ioaccel commands to drain out */
7444 for (i = 0; i < h->nr_cmds; i++) {
7445 c = h->cmd_pool + i;
7446 refcount = atomic_inc_return(&c->refcount);
7447 if (refcount > 1) /* Command is allocated */
7448 accel_cmds_out += is_accelerated_cmd(c);
7451 if (accel_cmds_out <= 0)
7458 * This is it. Register the PCI driver information for the cards we control
7459 * the OS will call our registered routines when it finds one of our cards.
7461 static int __init hpsa_init(void)
7463 return pci_register_driver(&hpsa_pci_driver);
7466 static void __exit hpsa_cleanup(void)
7468 pci_unregister_driver(&hpsa_pci_driver);
7471 static void __attribute__((unused)) verify_offsets(void)
7473 #define VERIFY_OFFSET(member, offset) \
7474 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7476 VERIFY_OFFSET(structure_size, 0);
7477 VERIFY_OFFSET(volume_blk_size, 4);
7478 VERIFY_OFFSET(volume_blk_cnt, 8);
7479 VERIFY_OFFSET(phys_blk_shift, 16);
7480 VERIFY_OFFSET(parity_rotation_shift, 17);
7481 VERIFY_OFFSET(strip_size, 18);
7482 VERIFY_OFFSET(disk_starting_blk, 20);
7483 VERIFY_OFFSET(disk_blk_cnt, 28);
7484 VERIFY_OFFSET(data_disks_per_row, 36);
7485 VERIFY_OFFSET(metadata_disks_per_row, 38);
7486 VERIFY_OFFSET(row_cnt, 40);
7487 VERIFY_OFFSET(layout_map_count, 42);
7488 VERIFY_OFFSET(flags, 44);
7489 VERIFY_OFFSET(dekindex, 46);
7490 /* VERIFY_OFFSET(reserved, 48 */
7491 VERIFY_OFFSET(data, 64);
7493 #undef VERIFY_OFFSET
7495 #define VERIFY_OFFSET(member, offset) \
7496 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7498 VERIFY_OFFSET(IU_type, 0);
7499 VERIFY_OFFSET(direction, 1);
7500 VERIFY_OFFSET(reply_queue, 2);
7501 /* VERIFY_OFFSET(reserved1, 3); */
7502 VERIFY_OFFSET(scsi_nexus, 4);
7503 VERIFY_OFFSET(Tag, 8);
7504 VERIFY_OFFSET(cdb, 16);
7505 VERIFY_OFFSET(cciss_lun, 32);
7506 VERIFY_OFFSET(data_len, 40);
7507 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7508 VERIFY_OFFSET(sg_count, 45);
7509 /* VERIFY_OFFSET(reserved3 */
7510 VERIFY_OFFSET(err_ptr, 48);
7511 VERIFY_OFFSET(err_len, 56);
7512 /* VERIFY_OFFSET(reserved4 */
7513 VERIFY_OFFSET(sg, 64);
7515 #undef VERIFY_OFFSET
7517 #define VERIFY_OFFSET(member, offset) \
7518 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7520 VERIFY_OFFSET(dev_handle, 0x00);
7521 VERIFY_OFFSET(reserved1, 0x02);
7522 VERIFY_OFFSET(function, 0x03);
7523 VERIFY_OFFSET(reserved2, 0x04);
7524 VERIFY_OFFSET(err_info, 0x0C);
7525 VERIFY_OFFSET(reserved3, 0x10);
7526 VERIFY_OFFSET(err_info_len, 0x12);
7527 VERIFY_OFFSET(reserved4, 0x13);
7528 VERIFY_OFFSET(sgl_offset, 0x14);
7529 VERIFY_OFFSET(reserved5, 0x15);
7530 VERIFY_OFFSET(transfer_len, 0x1C);
7531 VERIFY_OFFSET(reserved6, 0x20);
7532 VERIFY_OFFSET(io_flags, 0x24);
7533 VERIFY_OFFSET(reserved7, 0x26);
7534 VERIFY_OFFSET(LUN, 0x34);
7535 VERIFY_OFFSET(control, 0x3C);
7536 VERIFY_OFFSET(CDB, 0x40);
7537 VERIFY_OFFSET(reserved8, 0x50);
7538 VERIFY_OFFSET(host_context_flags, 0x60);
7539 VERIFY_OFFSET(timeout_sec, 0x62);
7540 VERIFY_OFFSET(ReplyQueue, 0x64);
7541 VERIFY_OFFSET(reserved9, 0x65);
7542 VERIFY_OFFSET(tag, 0x68);
7543 VERIFY_OFFSET(host_addr, 0x70);
7544 VERIFY_OFFSET(CISS_LUN, 0x78);
7545 VERIFY_OFFSET(SG, 0x78 + 8);
7546 #undef VERIFY_OFFSET
7549 module_init(hpsa_init);
7550 module_exit(hpsa_cleanup);