]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/scsi/hpsa.c
hpsa: count passthru cmds with atomics, not a spin locked int
[karo-tx-linux.git] / drivers / scsi / hpsa.c
1 /*
2  *    Disk Array driver for HP Smart Array SAS controllers
3  *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
4  *
5  *    This program is free software; you can redistribute it and/or modify
6  *    it under the terms of the GNU General Public License as published by
7  *    the Free Software Foundation; version 2 of the License.
8  *
9  *    This program is distributed in the hope that it will be useful,
10  *    but WITHOUT ANY WARRANTY; without even the implied warranty of
11  *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12  *    NON INFRINGEMENT.  See the GNU General Public License for more details.
13  *
14  *    You should have received a copy of the GNU General Public License
15  *    along with this program; if not, write to the Free Software
16  *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17  *
18  *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
19  *
20  */
21
22 #include <linux/module.h>
23 #include <linux/interrupt.h>
24 #include <linux/types.h>
25 #include <linux/pci.h>
26 #include <linux/pci-aspm.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/fs.h>
31 #include <linux/timer.h>
32 #include <linux/init.h>
33 #include <linux/spinlock.h>
34 #include <linux/compat.h>
35 #include <linux/blktrace_api.h>
36 #include <linux/uaccess.h>
37 #include <linux/io.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_tcq.h>
46 #include <linux/cciss_ioctl.h>
47 #include <linux/string.h>
48 #include <linux/bitmap.h>
49 #include <linux/atomic.h>
50 #include <linux/jiffies.h>
51 #include <linux/percpu-defs.h>
52 #include <linux/percpu.h>
53 #include <asm/unaligned.h>
54 #include <asm/div64.h>
55 #include "hpsa_cmd.h"
56 #include "hpsa.h"
57
58 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
59 #define HPSA_DRIVER_VERSION "3.4.4-1"
60 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
61 #define HPSA "hpsa"
62
63 /* How long to wait (in milliseconds) for board to go into simple mode */
64 #define MAX_CONFIG_WAIT 30000
65 #define MAX_IOCTL_CONFIG_WAIT 1000
66
67 /*define how many times we will try a command because of bus resets */
68 #define MAX_CMD_RETRIES 3
69
70 /* Embedded module documentation macros - see modules.h */
71 MODULE_AUTHOR("Hewlett-Packard Company");
72 MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
73         HPSA_DRIVER_VERSION);
74 MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
75 MODULE_VERSION(HPSA_DRIVER_VERSION);
76 MODULE_LICENSE("GPL");
77
78 static int hpsa_allow_any;
79 module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
80 MODULE_PARM_DESC(hpsa_allow_any,
81                 "Allow hpsa driver to access unknown HP Smart Array hardware");
82 static int hpsa_simple_mode;
83 module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
84 MODULE_PARM_DESC(hpsa_simple_mode,
85         "Use 'simple mode' rather than 'performant mode'");
86
87 /* define the PCI info for the cards we can control */
88 static const struct pci_device_id hpsa_pci_device_id[] = {
89         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3241},
90         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3243},
91         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3245},
92         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3247},
93         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3249},
94         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324A},
95         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x324B},
96         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSE,     0x103C, 0x3233},
97         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3350},
98         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},
99         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},
100         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353},
101         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},
102         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},
103         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356},
104         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},
105         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},
106         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923},
107         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1924},
108         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1926},
109         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1928},
110         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1929},
111         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BD},
112         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BE},
113         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21BF},
114         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C0},
115         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C1},
116         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C2},
117         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},
118         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},
119         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5},
120         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},
121         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},
122         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},
123         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9},
124         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA},
125         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB},
126         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC},
127         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD},
128         {PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE},
129         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
130         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
131         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
132         {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
133         {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
134         {PCI_VENDOR_ID_HP,     PCI_ANY_ID,      PCI_ANY_ID, PCI_ANY_ID,
135                 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
136         {0,}
137 };
138
139 MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
140
141 /*  board_id = Subsystem Device ID & Vendor ID
142  *  product = Marketing Name for the board
143  *  access = Address of the struct of function pointers
144  */
145 static struct board_type products[] = {
146         {0x3241103C, "Smart Array P212", &SA5_access},
147         {0x3243103C, "Smart Array P410", &SA5_access},
148         {0x3245103C, "Smart Array P410i", &SA5_access},
149         {0x3247103C, "Smart Array P411", &SA5_access},
150         {0x3249103C, "Smart Array P812", &SA5_access},
151         {0x324A103C, "Smart Array P712m", &SA5_access},
152         {0x324B103C, "Smart Array P711m", &SA5_access},
153         {0x3233103C, "HP StorageWorks 1210m", &SA5_access}, /* alias of 333f */
154         {0x3350103C, "Smart Array P222", &SA5_access},
155         {0x3351103C, "Smart Array P420", &SA5_access},
156         {0x3352103C, "Smart Array P421", &SA5_access},
157         {0x3353103C, "Smart Array P822", &SA5_access},
158         {0x3354103C, "Smart Array P420i", &SA5_access},
159         {0x3355103C, "Smart Array P220i", &SA5_access},
160         {0x3356103C, "Smart Array P721m", &SA5_access},
161         {0x1921103C, "Smart Array P830i", &SA5_access},
162         {0x1922103C, "Smart Array P430", &SA5_access},
163         {0x1923103C, "Smart Array P431", &SA5_access},
164         {0x1924103C, "Smart Array P830", &SA5_access},
165         {0x1926103C, "Smart Array P731m", &SA5_access},
166         {0x1928103C, "Smart Array P230i", &SA5_access},
167         {0x1929103C, "Smart Array P530", &SA5_access},
168         {0x21BD103C, "Smart Array", &SA5_access},
169         {0x21BE103C, "Smart Array", &SA5_access},
170         {0x21BF103C, "Smart Array", &SA5_access},
171         {0x21C0103C, "Smart Array", &SA5_access},
172         {0x21C1103C, "Smart Array", &SA5_access},
173         {0x21C2103C, "Smart Array", &SA5_access},
174         {0x21C3103C, "Smart Array", &SA5_access},
175         {0x21C4103C, "Smart Array", &SA5_access},
176         {0x21C5103C, "Smart Array", &SA5_access},
177         {0x21C6103C, "Smart Array", &SA5_access},
178         {0x21C7103C, "Smart Array", &SA5_access},
179         {0x21C8103C, "Smart Array", &SA5_access},
180         {0x21C9103C, "Smart Array", &SA5_access},
181         {0x21CA103C, "Smart Array", &SA5_access},
182         {0x21CB103C, "Smart Array", &SA5_access},
183         {0x21CC103C, "Smart Array", &SA5_access},
184         {0x21CD103C, "Smart Array", &SA5_access},
185         {0x21CE103C, "Smart Array", &SA5_access},
186         {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
187         {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
188         {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
189         {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
190         {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
191         {0xFFFF103C, "Unknown Smart Array", &SA5_access},
192 };
193
194 static int number_of_controllers;
195
196 static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
197 static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
198 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
199
200 #ifdef CONFIG_COMPAT
201 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd,
202         void __user *arg);
203 #endif
204
205 static void cmd_free(struct ctlr_info *h, struct CommandList *c);
206 static struct CommandList *cmd_alloc(struct ctlr_info *h);
207 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
208         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
209         int cmd_type);
210 static void hpsa_free_cmd_pool(struct ctlr_info *h);
211 #define VPD_PAGE (1 << 8)
212
213 static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
214 static void hpsa_scan_start(struct Scsi_Host *);
215 static int hpsa_scan_finished(struct Scsi_Host *sh,
216         unsigned long elapsed_time);
217 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth);
218
219 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
220 static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
221 static int hpsa_slave_alloc(struct scsi_device *sdev);
222 static void hpsa_slave_destroy(struct scsi_device *sdev);
223
224 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
225 static int check_for_unit_attention(struct ctlr_info *h,
226         struct CommandList *c);
227 static void check_ioctl_unit_attention(struct ctlr_info *h,
228         struct CommandList *c);
229 /* performant mode helper functions */
230 static void calc_bucket_map(int *bucket, int num_buckets,
231         int nsgs, int min_blocks, u32 *bucket_map);
232 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
233 static inline u32 next_command(struct ctlr_info *h, u8 q);
234 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
235                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
236                                u64 *cfg_offset);
237 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
238                                     unsigned long *memory_bar);
239 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
240 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
241                                      int wait_for_ready);
242 static inline void finish_cmd(struct CommandList *c);
243 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
244 #define BOARD_NOT_READY 0
245 #define BOARD_READY 1
246 static void hpsa_drain_accel_commands(struct ctlr_info *h);
247 static void hpsa_flush_cache(struct ctlr_info *h);
248 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
249         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
250         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk);
251 static void hpsa_command_resubmit_worker(struct work_struct *work);
252
253 static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
254 {
255         unsigned long *priv = shost_priv(sdev->host);
256         return (struct ctlr_info *) *priv;
257 }
258
259 static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
260 {
261         unsigned long *priv = shost_priv(sh);
262         return (struct ctlr_info *) *priv;
263 }
264
265 static int check_for_unit_attention(struct ctlr_info *h,
266         struct CommandList *c)
267 {
268         if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
269                 return 0;
270
271         switch (c->err_info->SenseInfo[12]) {
272         case STATE_CHANGED:
273                 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
274                         "detected, command retried\n", h->ctlr);
275                 break;
276         case LUN_FAILED:
277                 dev_warn(&h->pdev->dev,
278                         HPSA "%d: LUN failure detected\n", h->ctlr);
279                 break;
280         case REPORT_LUNS_CHANGED:
281                 dev_warn(&h->pdev->dev,
282                         HPSA "%d: report LUN data changed\n", h->ctlr);
283         /*
284          * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
285          * target (array) devices.
286          */
287                 break;
288         case POWER_OR_RESET:
289                 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
290                         "or device reset detected\n", h->ctlr);
291                 break;
292         case UNIT_ATTENTION_CLEARED:
293                 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
294                     "cleared by another initiator\n", h->ctlr);
295                 break;
296         default:
297                 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
298                         "unit attention detected\n", h->ctlr);
299                 break;
300         }
301         return 1;
302 }
303
304 static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
305 {
306         if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
307                 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
308                  c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
309                 return 0;
310         dev_warn(&h->pdev->dev, HPSA "device busy");
311         return 1;
312 }
313
314 static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
315                                          struct device_attribute *attr,
316                                          const char *buf, size_t count)
317 {
318         int status, len;
319         struct ctlr_info *h;
320         struct Scsi_Host *shost = class_to_shost(dev);
321         char tmpbuf[10];
322
323         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
324                 return -EACCES;
325         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
326         strncpy(tmpbuf, buf, len);
327         tmpbuf[len] = '\0';
328         if (sscanf(tmpbuf, "%d", &status) != 1)
329                 return -EINVAL;
330         h = shost_to_hba(shost);
331         h->acciopath_status = !!status;
332         dev_warn(&h->pdev->dev,
333                 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
334                 h->acciopath_status ? "enabled" : "disabled");
335         return count;
336 }
337
338 static ssize_t host_store_raid_offload_debug(struct device *dev,
339                                          struct device_attribute *attr,
340                                          const char *buf, size_t count)
341 {
342         int debug_level, len;
343         struct ctlr_info *h;
344         struct Scsi_Host *shost = class_to_shost(dev);
345         char tmpbuf[10];
346
347         if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
348                 return -EACCES;
349         len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
350         strncpy(tmpbuf, buf, len);
351         tmpbuf[len] = '\0';
352         if (sscanf(tmpbuf, "%d", &debug_level) != 1)
353                 return -EINVAL;
354         if (debug_level < 0)
355                 debug_level = 0;
356         h = shost_to_hba(shost);
357         h->raid_offload_debug = debug_level;
358         dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
359                 h->raid_offload_debug);
360         return count;
361 }
362
363 static ssize_t host_store_rescan(struct device *dev,
364                                  struct device_attribute *attr,
365                                  const char *buf, size_t count)
366 {
367         struct ctlr_info *h;
368         struct Scsi_Host *shost = class_to_shost(dev);
369         h = shost_to_hba(shost);
370         hpsa_scan_start(h->scsi_host);
371         return count;
372 }
373
374 static ssize_t host_show_firmware_revision(struct device *dev,
375              struct device_attribute *attr, char *buf)
376 {
377         struct ctlr_info *h;
378         struct Scsi_Host *shost = class_to_shost(dev);
379         unsigned char *fwrev;
380
381         h = shost_to_hba(shost);
382         if (!h->hba_inquiry_data)
383                 return 0;
384         fwrev = &h->hba_inquiry_data[32];
385         return snprintf(buf, 20, "%c%c%c%c\n",
386                 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
387 }
388
389 static ssize_t host_show_commands_outstanding(struct device *dev,
390              struct device_attribute *attr, char *buf)
391 {
392         struct Scsi_Host *shost = class_to_shost(dev);
393         struct ctlr_info *h = shost_to_hba(shost);
394
395         return snprintf(buf, 20, "%d\n",
396                         atomic_read(&h->commands_outstanding));
397 }
398
399 static ssize_t host_show_transport_mode(struct device *dev,
400         struct device_attribute *attr, char *buf)
401 {
402         struct ctlr_info *h;
403         struct Scsi_Host *shost = class_to_shost(dev);
404
405         h = shost_to_hba(shost);
406         return snprintf(buf, 20, "%s\n",
407                 h->transMethod & CFGTBL_Trans_Performant ?
408                         "performant" : "simple");
409 }
410
411 static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
412         struct device_attribute *attr, char *buf)
413 {
414         struct ctlr_info *h;
415         struct Scsi_Host *shost = class_to_shost(dev);
416
417         h = shost_to_hba(shost);
418         return snprintf(buf, 30, "HP SSD Smart Path %s\n",
419                 (h->acciopath_status == 1) ?  "enabled" : "disabled");
420 }
421
422 /* List of controllers which cannot be hard reset on kexec with reset_devices */
423 static u32 unresettable_controller[] = {
424         0x324a103C, /* Smart Array P712m */
425         0x324b103C, /* SmartArray P711m */
426         0x3223103C, /* Smart Array P800 */
427         0x3234103C, /* Smart Array P400 */
428         0x3235103C, /* Smart Array P400i */
429         0x3211103C, /* Smart Array E200i */
430         0x3212103C, /* Smart Array E200 */
431         0x3213103C, /* Smart Array E200i */
432         0x3214103C, /* Smart Array E200i */
433         0x3215103C, /* Smart Array E200i */
434         0x3237103C, /* Smart Array E500 */
435         0x323D103C, /* Smart Array P700m */
436         0x40800E11, /* Smart Array 5i */
437         0x409C0E11, /* Smart Array 6400 */
438         0x409D0E11, /* Smart Array 6400 EM */
439         0x40700E11, /* Smart Array 5300 */
440         0x40820E11, /* Smart Array 532 */
441         0x40830E11, /* Smart Array 5312 */
442         0x409A0E11, /* Smart Array 641 */
443         0x409B0E11, /* Smart Array 642 */
444         0x40910E11, /* Smart Array 6i */
445 };
446
447 /* List of controllers which cannot even be soft reset */
448 static u32 soft_unresettable_controller[] = {
449         0x40800E11, /* Smart Array 5i */
450         0x40700E11, /* Smart Array 5300 */
451         0x40820E11, /* Smart Array 532 */
452         0x40830E11, /* Smart Array 5312 */
453         0x409A0E11, /* Smart Array 641 */
454         0x409B0E11, /* Smart Array 642 */
455         0x40910E11, /* Smart Array 6i */
456         /* Exclude 640x boards.  These are two pci devices in one slot
457          * which share a battery backed cache module.  One controls the
458          * cache, the other accesses the cache through the one that controls
459          * it.  If we reset the one controlling the cache, the other will
460          * likely not be happy.  Just forbid resetting this conjoined mess.
461          * The 640x isn't really supported by hpsa anyway.
462          */
463         0x409C0E11, /* Smart Array 6400 */
464         0x409D0E11, /* Smart Array 6400 EM */
465 };
466
467 static int ctlr_is_hard_resettable(u32 board_id)
468 {
469         int i;
470
471         for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
472                 if (unresettable_controller[i] == board_id)
473                         return 0;
474         return 1;
475 }
476
477 static int ctlr_is_soft_resettable(u32 board_id)
478 {
479         int i;
480
481         for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
482                 if (soft_unresettable_controller[i] == board_id)
483                         return 0;
484         return 1;
485 }
486
487 static int ctlr_is_resettable(u32 board_id)
488 {
489         return ctlr_is_hard_resettable(board_id) ||
490                 ctlr_is_soft_resettable(board_id);
491 }
492
493 static ssize_t host_show_resettable(struct device *dev,
494         struct device_attribute *attr, char *buf)
495 {
496         struct ctlr_info *h;
497         struct Scsi_Host *shost = class_to_shost(dev);
498
499         h = shost_to_hba(shost);
500         return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
501 }
502
503 static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
504 {
505         return (scsi3addr[3] & 0xC0) == 0x40;
506 }
507
508 static const char * const raid_label[] = { "0", "4", "1(+0)", "5", "5+1", "6",
509         "1(+0)ADM", "UNKNOWN"
510 };
511 #define HPSA_RAID_0     0
512 #define HPSA_RAID_4     1
513 #define HPSA_RAID_1     2       /* also used for RAID 10 */
514 #define HPSA_RAID_5     3       /* also used for RAID 50 */
515 #define HPSA_RAID_51    4
516 #define HPSA_RAID_6     5       /* also used for RAID 60 */
517 #define HPSA_RAID_ADM   6       /* also used for RAID 1+0 ADM */
518 #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
519
520 static ssize_t raid_level_show(struct device *dev,
521              struct device_attribute *attr, char *buf)
522 {
523         ssize_t l = 0;
524         unsigned char rlevel;
525         struct ctlr_info *h;
526         struct scsi_device *sdev;
527         struct hpsa_scsi_dev_t *hdev;
528         unsigned long flags;
529
530         sdev = to_scsi_device(dev);
531         h = sdev_to_hba(sdev);
532         spin_lock_irqsave(&h->lock, flags);
533         hdev = sdev->hostdata;
534         if (!hdev) {
535                 spin_unlock_irqrestore(&h->lock, flags);
536                 return -ENODEV;
537         }
538
539         /* Is this even a logical drive? */
540         if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
541                 spin_unlock_irqrestore(&h->lock, flags);
542                 l = snprintf(buf, PAGE_SIZE, "N/A\n");
543                 return l;
544         }
545
546         rlevel = hdev->raid_level;
547         spin_unlock_irqrestore(&h->lock, flags);
548         if (rlevel > RAID_UNKNOWN)
549                 rlevel = RAID_UNKNOWN;
550         l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
551         return l;
552 }
553
554 static ssize_t lunid_show(struct device *dev,
555              struct device_attribute *attr, char *buf)
556 {
557         struct ctlr_info *h;
558         struct scsi_device *sdev;
559         struct hpsa_scsi_dev_t *hdev;
560         unsigned long flags;
561         unsigned char lunid[8];
562
563         sdev = to_scsi_device(dev);
564         h = sdev_to_hba(sdev);
565         spin_lock_irqsave(&h->lock, flags);
566         hdev = sdev->hostdata;
567         if (!hdev) {
568                 spin_unlock_irqrestore(&h->lock, flags);
569                 return -ENODEV;
570         }
571         memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
572         spin_unlock_irqrestore(&h->lock, flags);
573         return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
574                 lunid[0], lunid[1], lunid[2], lunid[3],
575                 lunid[4], lunid[5], lunid[6], lunid[7]);
576 }
577
578 static ssize_t unique_id_show(struct device *dev,
579              struct device_attribute *attr, char *buf)
580 {
581         struct ctlr_info *h;
582         struct scsi_device *sdev;
583         struct hpsa_scsi_dev_t *hdev;
584         unsigned long flags;
585         unsigned char sn[16];
586
587         sdev = to_scsi_device(dev);
588         h = sdev_to_hba(sdev);
589         spin_lock_irqsave(&h->lock, flags);
590         hdev = sdev->hostdata;
591         if (!hdev) {
592                 spin_unlock_irqrestore(&h->lock, flags);
593                 return -ENODEV;
594         }
595         memcpy(sn, hdev->device_id, sizeof(sn));
596         spin_unlock_irqrestore(&h->lock, flags);
597         return snprintf(buf, 16 * 2 + 2,
598                         "%02X%02X%02X%02X%02X%02X%02X%02X"
599                         "%02X%02X%02X%02X%02X%02X%02X%02X\n",
600                         sn[0], sn[1], sn[2], sn[3],
601                         sn[4], sn[5], sn[6], sn[7],
602                         sn[8], sn[9], sn[10], sn[11],
603                         sn[12], sn[13], sn[14], sn[15]);
604 }
605
606 static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
607              struct device_attribute *attr, char *buf)
608 {
609         struct ctlr_info *h;
610         struct scsi_device *sdev;
611         struct hpsa_scsi_dev_t *hdev;
612         unsigned long flags;
613         int offload_enabled;
614
615         sdev = to_scsi_device(dev);
616         h = sdev_to_hba(sdev);
617         spin_lock_irqsave(&h->lock, flags);
618         hdev = sdev->hostdata;
619         if (!hdev) {
620                 spin_unlock_irqrestore(&h->lock, flags);
621                 return -ENODEV;
622         }
623         offload_enabled = hdev->offload_enabled;
624         spin_unlock_irqrestore(&h->lock, flags);
625         return snprintf(buf, 20, "%d\n", offload_enabled);
626 }
627
628 static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
629 static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
630 static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
631 static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
632 static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
633                         host_show_hp_ssd_smart_path_enabled, NULL);
634 static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
635                 host_show_hp_ssd_smart_path_status,
636                 host_store_hp_ssd_smart_path_status);
637 static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
638                         host_store_raid_offload_debug);
639 static DEVICE_ATTR(firmware_revision, S_IRUGO,
640         host_show_firmware_revision, NULL);
641 static DEVICE_ATTR(commands_outstanding, S_IRUGO,
642         host_show_commands_outstanding, NULL);
643 static DEVICE_ATTR(transport_mode, S_IRUGO,
644         host_show_transport_mode, NULL);
645 static DEVICE_ATTR(resettable, S_IRUGO,
646         host_show_resettable, NULL);
647
648 static struct device_attribute *hpsa_sdev_attrs[] = {
649         &dev_attr_raid_level,
650         &dev_attr_lunid,
651         &dev_attr_unique_id,
652         &dev_attr_hp_ssd_smart_path_enabled,
653         NULL,
654 };
655
656 static struct device_attribute *hpsa_shost_attrs[] = {
657         &dev_attr_rescan,
658         &dev_attr_firmware_revision,
659         &dev_attr_commands_outstanding,
660         &dev_attr_transport_mode,
661         &dev_attr_resettable,
662         &dev_attr_hp_ssd_smart_path_status,
663         &dev_attr_raid_offload_debug,
664         NULL,
665 };
666
667 static struct scsi_host_template hpsa_driver_template = {
668         .module                 = THIS_MODULE,
669         .name                   = HPSA,
670         .proc_name              = HPSA,
671         .queuecommand           = hpsa_scsi_queue_command,
672         .scan_start             = hpsa_scan_start,
673         .scan_finished          = hpsa_scan_finished,
674         .change_queue_depth     = hpsa_change_queue_depth,
675         .this_id                = -1,
676         .use_clustering         = ENABLE_CLUSTERING,
677         .eh_abort_handler       = hpsa_eh_abort_handler,
678         .eh_device_reset_handler = hpsa_eh_device_reset_handler,
679         .ioctl                  = hpsa_ioctl,
680         .slave_alloc            = hpsa_slave_alloc,
681         .slave_destroy          = hpsa_slave_destroy,
682 #ifdef CONFIG_COMPAT
683         .compat_ioctl           = hpsa_compat_ioctl,
684 #endif
685         .sdev_attrs = hpsa_sdev_attrs,
686         .shost_attrs = hpsa_shost_attrs,
687         .max_sectors = 8192,
688         .no_write_same = 1,
689 };
690
691 static inline u32 next_command(struct ctlr_info *h, u8 q)
692 {
693         u32 a;
694         struct reply_queue_buffer *rq = &h->reply_queue[q];
695
696         if (h->transMethod & CFGTBL_Trans_io_accel1)
697                 return h->access.command_completed(h, q);
698
699         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
700                 return h->access.command_completed(h, q);
701
702         if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
703                 a = rq->head[rq->current_entry];
704                 rq->current_entry++;
705                 atomic_dec(&h->commands_outstanding);
706         } else {
707                 a = FIFO_EMPTY;
708         }
709         /* Check for wraparound */
710         if (rq->current_entry == h->max_commands) {
711                 rq->current_entry = 0;
712                 rq->wraparound ^= 1;
713         }
714         return a;
715 }
716
717 /*
718  * There are some special bits in the bus address of the
719  * command that we have to set for the controller to know
720  * how to process the command:
721  *
722  * Normal performant mode:
723  * bit 0: 1 means performant mode, 0 means simple mode.
724  * bits 1-3 = block fetch table entry
725  * bits 4-6 = command type (== 0)
726  *
727  * ioaccel1 mode:
728  * bit 0 = "performant mode" bit.
729  * bits 1-3 = block fetch table entry
730  * bits 4-6 = command type (== 110)
731  * (command type is needed because ioaccel1 mode
732  * commands are submitted through the same register as normal
733  * mode commands, so this is how the controller knows whether
734  * the command is normal mode or ioaccel1 mode.)
735  *
736  * ioaccel2 mode:
737  * bit 0 = "performant mode" bit.
738  * bits 1-4 = block fetch table entry (note extra bit)
739  * bits 4-6 = not needed, because ioaccel2 mode has
740  * a separate special register for submitting commands.
741  */
742
743 /* set_performant_mode: Modify the tag for cciss performant
744  * set bit 0 for pull model, bits 3-1 for block fetch
745  * register number
746  */
747 static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
748 {
749         if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
750                 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
751                 if (likely(h->msix_vector > 0))
752                         c->Header.ReplyQueue =
753                                 raw_smp_processor_id() % h->nreply_queues;
754         }
755 }
756
757 static void set_ioaccel1_performant_mode(struct ctlr_info *h,
758                                                 struct CommandList *c)
759 {
760         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
761
762         /* Tell the controller to post the reply to the queue for this
763          * processor.  This seems to give the best I/O throughput.
764          */
765         cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
766         /* Set the bits in the address sent down to include:
767          *  - performant mode bit (bit 0)
768          *  - pull count (bits 1-3)
769          *  - command type (bits 4-6)
770          */
771         c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
772                                         IOACCEL1_BUSADDR_CMDTYPE;
773 }
774
775 static void set_ioaccel2_performant_mode(struct ctlr_info *h,
776                                                 struct CommandList *c)
777 {
778         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
779
780         /* Tell the controller to post the reply to the queue for this
781          * processor.  This seems to give the best I/O throughput.
782          */
783         cp->reply_queue = smp_processor_id() % h->nreply_queues;
784         /* Set the bits in the address sent down to include:
785          *  - performant mode bit not used in ioaccel mode 2
786          *  - pull count (bits 0-3)
787          *  - command type isn't needed for ioaccel2
788          */
789         c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
790 }
791
792 static int is_firmware_flash_cmd(u8 *cdb)
793 {
794         return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
795 }
796
797 /*
798  * During firmware flash, the heartbeat register may not update as frequently
799  * as it should.  So we dial down lockup detection during firmware flash. and
800  * dial it back up when firmware flash completes.
801  */
802 #define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
803 #define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
804 static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
805                 struct CommandList *c)
806 {
807         if (!is_firmware_flash_cmd(c->Request.CDB))
808                 return;
809         atomic_inc(&h->firmware_flash_in_progress);
810         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
811 }
812
813 static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
814                 struct CommandList *c)
815 {
816         if (is_firmware_flash_cmd(c->Request.CDB) &&
817                 atomic_dec_and_test(&h->firmware_flash_in_progress))
818                 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
819 }
820
821 static void enqueue_cmd_and_start_io(struct ctlr_info *h,
822         struct CommandList *c)
823 {
824         switch (c->cmd_type) {
825         case CMD_IOACCEL1:
826                 set_ioaccel1_performant_mode(h, c);
827                 break;
828         case CMD_IOACCEL2:
829                 set_ioaccel2_performant_mode(h, c);
830                 break;
831         default:
832                 set_performant_mode(h, c);
833         }
834         dial_down_lockup_detection_during_fw_flash(h, c);
835         atomic_inc(&h->commands_outstanding);
836         h->access.submit_command(h, c);
837 }
838
839 static inline int is_hba_lunid(unsigned char scsi3addr[])
840 {
841         return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
842 }
843
844 static inline int is_scsi_rev_5(struct ctlr_info *h)
845 {
846         if (!h->hba_inquiry_data)
847                 return 0;
848         if ((h->hba_inquiry_data[2] & 0x07) == 5)
849                 return 1;
850         return 0;
851 }
852
853 static int hpsa_find_target_lun(struct ctlr_info *h,
854         unsigned char scsi3addr[], int bus, int *target, int *lun)
855 {
856         /* finds an unused bus, target, lun for a new physical device
857          * assumes h->devlock is held
858          */
859         int i, found = 0;
860         DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
861
862         bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
863
864         for (i = 0; i < h->ndevices; i++) {
865                 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
866                         __set_bit(h->dev[i]->target, lun_taken);
867         }
868
869         i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
870         if (i < HPSA_MAX_DEVICES) {
871                 /* *bus = 1; */
872                 *target = i;
873                 *lun = 0;
874                 found = 1;
875         }
876         return !found;
877 }
878
879 /* Add an entry into h->dev[] array. */
880 static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
881                 struct hpsa_scsi_dev_t *device,
882                 struct hpsa_scsi_dev_t *added[], int *nadded)
883 {
884         /* assumes h->devlock is held */
885         int n = h->ndevices;
886         int i;
887         unsigned char addr1[8], addr2[8];
888         struct hpsa_scsi_dev_t *sd;
889
890         if (n >= HPSA_MAX_DEVICES) {
891                 dev_err(&h->pdev->dev, "too many devices, some will be "
892                         "inaccessible.\n");
893                 return -1;
894         }
895
896         /* physical devices do not have lun or target assigned until now. */
897         if (device->lun != -1)
898                 /* Logical device, lun is already assigned. */
899                 goto lun_assigned;
900
901         /* If this device a non-zero lun of a multi-lun device
902          * byte 4 of the 8-byte LUN addr will contain the logical
903          * unit no, zero otherwise.
904          */
905         if (device->scsi3addr[4] == 0) {
906                 /* This is not a non-zero lun of a multi-lun device */
907                 if (hpsa_find_target_lun(h, device->scsi3addr,
908                         device->bus, &device->target, &device->lun) != 0)
909                         return -1;
910                 goto lun_assigned;
911         }
912
913         /* This is a non-zero lun of a multi-lun device.
914          * Search through our list and find the device which
915          * has the same 8 byte LUN address, excepting byte 4.
916          * Assign the same bus and target for this new LUN.
917          * Use the logical unit number from the firmware.
918          */
919         memcpy(addr1, device->scsi3addr, 8);
920         addr1[4] = 0;
921         for (i = 0; i < n; i++) {
922                 sd = h->dev[i];
923                 memcpy(addr2, sd->scsi3addr, 8);
924                 addr2[4] = 0;
925                 /* differ only in byte 4? */
926                 if (memcmp(addr1, addr2, 8) == 0) {
927                         device->bus = sd->bus;
928                         device->target = sd->target;
929                         device->lun = device->scsi3addr[4];
930                         break;
931                 }
932         }
933         if (device->lun == -1) {
934                 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
935                         " suspect firmware bug or unsupported hardware "
936                         "configuration.\n");
937                         return -1;
938         }
939
940 lun_assigned:
941
942         h->dev[n] = device;
943         h->ndevices++;
944         added[*nadded] = device;
945         (*nadded)++;
946
947         /* initially, (before registering with scsi layer) we don't
948          * know our hostno and we don't want to print anything first
949          * time anyway (the scsi layer's inquiries will show that info)
950          */
951         /* if (hostno != -1) */
952                 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
953                         scsi_device_type(device->devtype), hostno,
954                         device->bus, device->target, device->lun);
955         return 0;
956 }
957
958 /* Update an entry in h->dev[] array. */
959 static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
960         int entry, struct hpsa_scsi_dev_t *new_entry)
961 {
962         /* assumes h->devlock is held */
963         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
964
965         /* Raid level changed. */
966         h->dev[entry]->raid_level = new_entry->raid_level;
967
968         /* Raid offload parameters changed.  Careful about the ordering. */
969         if (new_entry->offload_config && new_entry->offload_enabled) {
970                 /*
971                  * if drive is newly offload_enabled, we want to copy the
972                  * raid map data first.  If previously offload_enabled and
973                  * offload_config were set, raid map data had better be
974                  * the same as it was before.  if raid map data is changed
975                  * then it had better be the case that
976                  * h->dev[entry]->offload_enabled is currently 0.
977                  */
978                 h->dev[entry]->raid_map = new_entry->raid_map;
979                 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
980                 wmb(); /* ensure raid map updated prior to ->offload_enabled */
981         }
982         h->dev[entry]->offload_config = new_entry->offload_config;
983         h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
984         h->dev[entry]->offload_enabled = new_entry->offload_enabled;
985         h->dev[entry]->queue_depth = new_entry->queue_depth;
986
987         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
988                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
989                 new_entry->target, new_entry->lun);
990 }
991
992 /* Replace an entry from h->dev[] array. */
993 static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
994         int entry, struct hpsa_scsi_dev_t *new_entry,
995         struct hpsa_scsi_dev_t *added[], int *nadded,
996         struct hpsa_scsi_dev_t *removed[], int *nremoved)
997 {
998         /* assumes h->devlock is held */
999         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1000         removed[*nremoved] = h->dev[entry];
1001         (*nremoved)++;
1002
1003         /*
1004          * New physical devices won't have target/lun assigned yet
1005          * so we need to preserve the values in the slot we are replacing.
1006          */
1007         if (new_entry->target == -1) {
1008                 new_entry->target = h->dev[entry]->target;
1009                 new_entry->lun = h->dev[entry]->lun;
1010         }
1011
1012         h->dev[entry] = new_entry;
1013         added[*nadded] = new_entry;
1014         (*nadded)++;
1015         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1016                 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1017                         new_entry->target, new_entry->lun);
1018 }
1019
1020 /* Remove an entry from h->dev[] array. */
1021 static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1022         struct hpsa_scsi_dev_t *removed[], int *nremoved)
1023 {
1024         /* assumes h->devlock is held */
1025         int i;
1026         struct hpsa_scsi_dev_t *sd;
1027
1028         BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
1029
1030         sd = h->dev[entry];
1031         removed[*nremoved] = h->dev[entry];
1032         (*nremoved)++;
1033
1034         for (i = entry; i < h->ndevices-1; i++)
1035                 h->dev[i] = h->dev[i+1];
1036         h->ndevices--;
1037         dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1038                 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1039                 sd->lun);
1040 }
1041
1042 #define SCSI3ADDR_EQ(a, b) ( \
1043         (a)[7] == (b)[7] && \
1044         (a)[6] == (b)[6] && \
1045         (a)[5] == (b)[5] && \
1046         (a)[4] == (b)[4] && \
1047         (a)[3] == (b)[3] && \
1048         (a)[2] == (b)[2] && \
1049         (a)[1] == (b)[1] && \
1050         (a)[0] == (b)[0])
1051
1052 static void fixup_botched_add(struct ctlr_info *h,
1053         struct hpsa_scsi_dev_t *added)
1054 {
1055         /* called when scsi_add_device fails in order to re-adjust
1056          * h->dev[] to match the mid layer's view.
1057          */
1058         unsigned long flags;
1059         int i, j;
1060
1061         spin_lock_irqsave(&h->lock, flags);
1062         for (i = 0; i < h->ndevices; i++) {
1063                 if (h->dev[i] == added) {
1064                         for (j = i; j < h->ndevices-1; j++)
1065                                 h->dev[j] = h->dev[j+1];
1066                         h->ndevices--;
1067                         break;
1068                 }
1069         }
1070         spin_unlock_irqrestore(&h->lock, flags);
1071         kfree(added);
1072 }
1073
1074 static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1075         struct hpsa_scsi_dev_t *dev2)
1076 {
1077         /* we compare everything except lun and target as these
1078          * are not yet assigned.  Compare parts likely
1079          * to differ first
1080          */
1081         if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1082                 sizeof(dev1->scsi3addr)) != 0)
1083                 return 0;
1084         if (memcmp(dev1->device_id, dev2->device_id,
1085                 sizeof(dev1->device_id)) != 0)
1086                 return 0;
1087         if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1088                 return 0;
1089         if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1090                 return 0;
1091         if (dev1->devtype != dev2->devtype)
1092                 return 0;
1093         if (dev1->bus != dev2->bus)
1094                 return 0;
1095         return 1;
1096 }
1097
1098 static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1099         struct hpsa_scsi_dev_t *dev2)
1100 {
1101         /* Device attributes that can change, but don't mean
1102          * that the device is a different device, nor that the OS
1103          * needs to be told anything about the change.
1104          */
1105         if (dev1->raid_level != dev2->raid_level)
1106                 return 1;
1107         if (dev1->offload_config != dev2->offload_config)
1108                 return 1;
1109         if (dev1->offload_enabled != dev2->offload_enabled)
1110                 return 1;
1111         if (dev1->queue_depth != dev2->queue_depth)
1112                 return 1;
1113         return 0;
1114 }
1115
1116 /* Find needle in haystack.  If exact match found, return DEVICE_SAME,
1117  * and return needle location in *index.  If scsi3addr matches, but not
1118  * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
1119  * location in *index.
1120  * In the case of a minor device attribute change, such as RAID level, just
1121  * return DEVICE_UPDATED, along with the updated device's location in index.
1122  * If needle not found, return DEVICE_NOT_FOUND.
1123  */
1124 static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1125         struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1126         int *index)
1127 {
1128         int i;
1129 #define DEVICE_NOT_FOUND 0
1130 #define DEVICE_CHANGED 1
1131 #define DEVICE_SAME 2
1132 #define DEVICE_UPDATED 3
1133         for (i = 0; i < haystack_size; i++) {
1134                 if (haystack[i] == NULL) /* previously removed. */
1135                         continue;
1136                 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1137                         *index = i;
1138                         if (device_is_the_same(needle, haystack[i])) {
1139                                 if (device_updated(needle, haystack[i]))
1140                                         return DEVICE_UPDATED;
1141                                 return DEVICE_SAME;
1142                         } else {
1143                                 /* Keep offline devices offline */
1144                                 if (needle->volume_offline)
1145                                         return DEVICE_NOT_FOUND;
1146                                 return DEVICE_CHANGED;
1147                         }
1148                 }
1149         }
1150         *index = -1;
1151         return DEVICE_NOT_FOUND;
1152 }
1153
1154 static void hpsa_monitor_offline_device(struct ctlr_info *h,
1155                                         unsigned char scsi3addr[])
1156 {
1157         struct offline_device_entry *device;
1158         unsigned long flags;
1159
1160         /* Check to see if device is already on the list */
1161         spin_lock_irqsave(&h->offline_device_lock, flags);
1162         list_for_each_entry(device, &h->offline_device_list, offline_list) {
1163                 if (memcmp(device->scsi3addr, scsi3addr,
1164                         sizeof(device->scsi3addr)) == 0) {
1165                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1166                         return;
1167                 }
1168         }
1169         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1170
1171         /* Device is not on the list, add it. */
1172         device = kmalloc(sizeof(*device), GFP_KERNEL);
1173         if (!device) {
1174                 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1175                 return;
1176         }
1177         memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1178         spin_lock_irqsave(&h->offline_device_lock, flags);
1179         list_add_tail(&device->offline_list, &h->offline_device_list);
1180         spin_unlock_irqrestore(&h->offline_device_lock, flags);
1181 }
1182
1183 /* Print a message explaining various offline volume states */
1184 static void hpsa_show_volume_status(struct ctlr_info *h,
1185         struct hpsa_scsi_dev_t *sd)
1186 {
1187         if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1188                 dev_info(&h->pdev->dev,
1189                         "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1190                         h->scsi_host->host_no,
1191                         sd->bus, sd->target, sd->lun);
1192         switch (sd->volume_offline) {
1193         case HPSA_LV_OK:
1194                 break;
1195         case HPSA_LV_UNDERGOING_ERASE:
1196                 dev_info(&h->pdev->dev,
1197                         "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1198                         h->scsi_host->host_no,
1199                         sd->bus, sd->target, sd->lun);
1200                 break;
1201         case HPSA_LV_UNDERGOING_RPI:
1202                 dev_info(&h->pdev->dev,
1203                         "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1204                         h->scsi_host->host_no,
1205                         sd->bus, sd->target, sd->lun);
1206                 break;
1207         case HPSA_LV_PENDING_RPI:
1208                 dev_info(&h->pdev->dev,
1209                                 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1210                                 h->scsi_host->host_no,
1211                                 sd->bus, sd->target, sd->lun);
1212                 break;
1213         case HPSA_LV_ENCRYPTED_NO_KEY:
1214                 dev_info(&h->pdev->dev,
1215                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1216                         h->scsi_host->host_no,
1217                         sd->bus, sd->target, sd->lun);
1218                 break;
1219         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1220                 dev_info(&h->pdev->dev,
1221                         "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1222                         h->scsi_host->host_no,
1223                         sd->bus, sd->target, sd->lun);
1224                 break;
1225         case HPSA_LV_UNDERGOING_ENCRYPTION:
1226                 dev_info(&h->pdev->dev,
1227                         "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1228                         h->scsi_host->host_no,
1229                         sd->bus, sd->target, sd->lun);
1230                 break;
1231         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1232                 dev_info(&h->pdev->dev,
1233                         "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1234                         h->scsi_host->host_no,
1235                         sd->bus, sd->target, sd->lun);
1236                 break;
1237         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1238                 dev_info(&h->pdev->dev,
1239                         "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1240                         h->scsi_host->host_no,
1241                         sd->bus, sd->target, sd->lun);
1242                 break;
1243         case HPSA_LV_PENDING_ENCRYPTION:
1244                 dev_info(&h->pdev->dev,
1245                         "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1246                         h->scsi_host->host_no,
1247                         sd->bus, sd->target, sd->lun);
1248                 break;
1249         case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1250                 dev_info(&h->pdev->dev,
1251                         "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1252                         h->scsi_host->host_no,
1253                         sd->bus, sd->target, sd->lun);
1254                 break;
1255         }
1256 }
1257
1258 /*
1259  * Figure the list of physical drive pointers for a logical drive with
1260  * raid offload configured.
1261  */
1262 static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
1263                                 struct hpsa_scsi_dev_t *dev[], int ndevices,
1264                                 struct hpsa_scsi_dev_t *logical_drive)
1265 {
1266         struct raid_map_data *map = &logical_drive->raid_map;
1267         struct raid_map_disk_data *dd = &map->data[0];
1268         int i, j;
1269         int total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
1270                                 le16_to_cpu(map->metadata_disks_per_row);
1271         int nraid_map_entries = le16_to_cpu(map->row_cnt) *
1272                                 le16_to_cpu(map->layout_map_count) *
1273                                 total_disks_per_row;
1274         int nphys_disk = le16_to_cpu(map->layout_map_count) *
1275                                 total_disks_per_row;
1276         int qdepth;
1277
1278         if (nraid_map_entries > RAID_MAP_MAX_ENTRIES)
1279                 nraid_map_entries = RAID_MAP_MAX_ENTRIES;
1280
1281         qdepth = 0;
1282         for (i = 0; i < nraid_map_entries; i++) {
1283                 logical_drive->phys_disk[i] = NULL;
1284                 if (!logical_drive->offload_config)
1285                         continue;
1286                 for (j = 0; j < ndevices; j++) {
1287                         if (dev[j]->devtype != TYPE_DISK)
1288                                 continue;
1289                         if (is_logical_dev_addr_mode(dev[j]->scsi3addr))
1290                                 continue;
1291                         if (dev[j]->ioaccel_handle != dd[i].ioaccel_handle)
1292                                 continue;
1293
1294                         logical_drive->phys_disk[i] = dev[j];
1295                         if (i < nphys_disk)
1296                                 qdepth = min(h->nr_cmds, qdepth +
1297                                     logical_drive->phys_disk[i]->queue_depth);
1298                         break;
1299                 }
1300
1301                 /*
1302                  * This can happen if a physical drive is removed and
1303                  * the logical drive is degraded.  In that case, the RAID
1304                  * map data will refer to a physical disk which isn't actually
1305                  * present.  And in that case offload_enabled should already
1306                  * be 0, but we'll turn it off here just in case
1307                  */
1308                 if (!logical_drive->phys_disk[i]) {
1309                         logical_drive->offload_enabled = 0;
1310                         logical_drive->queue_depth = h->nr_cmds;
1311                 }
1312         }
1313         if (nraid_map_entries)
1314                 /*
1315                  * This is correct for reads, too high for full stripe writes,
1316                  * way too high for partial stripe writes
1317                  */
1318                 logical_drive->queue_depth = qdepth;
1319         else
1320                 logical_drive->queue_depth = h->nr_cmds;
1321 }
1322
1323 static void hpsa_update_log_drive_phys_drive_ptrs(struct ctlr_info *h,
1324                                 struct hpsa_scsi_dev_t *dev[], int ndevices)
1325 {
1326         int i;
1327
1328         for (i = 0; i < ndevices; i++) {
1329                 if (dev[i]->devtype != TYPE_DISK)
1330                         continue;
1331                 if (!is_logical_dev_addr_mode(dev[i]->scsi3addr))
1332                         continue;
1333                 hpsa_figure_phys_disk_ptrs(h, dev, ndevices, dev[i]);
1334         }
1335 }
1336
1337 static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
1338         struct hpsa_scsi_dev_t *sd[], int nsds)
1339 {
1340         /* sd contains scsi3 addresses and devtypes, and inquiry
1341          * data.  This function takes what's in sd to be the current
1342          * reality and updates h->dev[] to reflect that reality.
1343          */
1344         int i, entry, device_change, changes = 0;
1345         struct hpsa_scsi_dev_t *csd;
1346         unsigned long flags;
1347         struct hpsa_scsi_dev_t **added, **removed;
1348         int nadded, nremoved;
1349         struct Scsi_Host *sh = NULL;
1350
1351         added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1352         removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
1353
1354         if (!added || !removed) {
1355                 dev_warn(&h->pdev->dev, "out of memory in "
1356                         "adjust_hpsa_scsi_table\n");
1357                 goto free_and_out;
1358         }
1359
1360         spin_lock_irqsave(&h->devlock, flags);
1361
1362         /* find any devices in h->dev[] that are not in
1363          * sd[] and remove them from h->dev[], and for any
1364          * devices which have changed, remove the old device
1365          * info and add the new device info.
1366          * If minor device attributes change, just update
1367          * the existing device structure.
1368          */
1369         i = 0;
1370         nremoved = 0;
1371         nadded = 0;
1372         while (i < h->ndevices) {
1373                 csd = h->dev[i];
1374                 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1375                 if (device_change == DEVICE_NOT_FOUND) {
1376                         changes++;
1377                         hpsa_scsi_remove_entry(h, hostno, i,
1378                                 removed, &nremoved);
1379                         continue; /* remove ^^^, hence i not incremented */
1380                 } else if (device_change == DEVICE_CHANGED) {
1381                         changes++;
1382                         hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1383                                 added, &nadded, removed, &nremoved);
1384                         /* Set it to NULL to prevent it from being freed
1385                          * at the bottom of hpsa_update_scsi_devices()
1386                          */
1387                         sd[entry] = NULL;
1388                 } else if (device_change == DEVICE_UPDATED) {
1389                         hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
1390                 }
1391                 i++;
1392         }
1393
1394         /* Now, make sure every device listed in sd[] is also
1395          * listed in h->dev[], adding them if they aren't found
1396          */
1397
1398         for (i = 0; i < nsds; i++) {
1399                 if (!sd[i]) /* if already added above. */
1400                         continue;
1401
1402                 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1403                  * as the SCSI mid-layer does not handle such devices well.
1404                  * It relentlessly loops sending TUR at 3Hz, then READ(10)
1405                  * at 160Hz, and prevents the system from coming up.
1406                  */
1407                 if (sd[i]->volume_offline) {
1408                         hpsa_show_volume_status(h, sd[i]);
1409                         dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1410                                 h->scsi_host->host_no,
1411                                 sd[i]->bus, sd[i]->target, sd[i]->lun);
1412                         continue;
1413                 }
1414
1415                 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1416                                         h->ndevices, &entry);
1417                 if (device_change == DEVICE_NOT_FOUND) {
1418                         changes++;
1419                         if (hpsa_scsi_add_entry(h, hostno, sd[i],
1420                                 added, &nadded) != 0)
1421                                 break;
1422                         sd[i] = NULL; /* prevent from being freed later. */
1423                 } else if (device_change == DEVICE_CHANGED) {
1424                         /* should never happen... */
1425                         changes++;
1426                         dev_warn(&h->pdev->dev,
1427                                 "device unexpectedly changed.\n");
1428                         /* but if it does happen, we just ignore that device */
1429                 }
1430         }
1431         spin_unlock_irqrestore(&h->devlock, flags);
1432
1433         /* Monitor devices which are in one of several NOT READY states to be
1434          * brought online later. This must be done without holding h->devlock,
1435          * so don't touch h->dev[]
1436          */
1437         for (i = 0; i < nsds; i++) {
1438                 if (!sd[i]) /* if already added above. */
1439                         continue;
1440                 if (sd[i]->volume_offline)
1441                         hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1442         }
1443
1444         /* Don't notify scsi mid layer of any changes the first time through
1445          * (or if there are no changes) scsi_scan_host will do it later the
1446          * first time through.
1447          */
1448         if (hostno == -1 || !changes)
1449                 goto free_and_out;
1450
1451         sh = h->scsi_host;
1452         /* Notify scsi mid layer of any removed devices */
1453         for (i = 0; i < nremoved; i++) {
1454                 struct scsi_device *sdev =
1455                         scsi_device_lookup(sh, removed[i]->bus,
1456                                 removed[i]->target, removed[i]->lun);
1457                 if (sdev != NULL) {
1458                         scsi_remove_device(sdev);
1459                         scsi_device_put(sdev);
1460                 } else {
1461                         /* We don't expect to get here.
1462                          * future cmds to this device will get selection
1463                          * timeout as if the device was gone.
1464                          */
1465                         dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1466                                 " for removal.", hostno, removed[i]->bus,
1467                                 removed[i]->target, removed[i]->lun);
1468                 }
1469                 kfree(removed[i]);
1470                 removed[i] = NULL;
1471         }
1472
1473         /* Notify scsi mid layer of any added devices */
1474         for (i = 0; i < nadded; i++) {
1475                 if (scsi_add_device(sh, added[i]->bus,
1476                         added[i]->target, added[i]->lun) == 0)
1477                         continue;
1478                 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1479                         "device not added.\n", hostno, added[i]->bus,
1480                         added[i]->target, added[i]->lun);
1481                 /* now we have to remove it from h->dev,
1482                  * since it didn't get added to scsi mid layer
1483                  */
1484                 fixup_botched_add(h, added[i]);
1485         }
1486
1487 free_and_out:
1488         kfree(added);
1489         kfree(removed);
1490 }
1491
1492 /*
1493  * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
1494  * Assume's h->devlock is held.
1495  */
1496 static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1497         int bus, int target, int lun)
1498 {
1499         int i;
1500         struct hpsa_scsi_dev_t *sd;
1501
1502         for (i = 0; i < h->ndevices; i++) {
1503                 sd = h->dev[i];
1504                 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1505                         return sd;
1506         }
1507         return NULL;
1508 }
1509
1510 /* link sdev->hostdata to our per-device structure. */
1511 static int hpsa_slave_alloc(struct scsi_device *sdev)
1512 {
1513         struct hpsa_scsi_dev_t *sd;
1514         unsigned long flags;
1515         struct ctlr_info *h;
1516
1517         h = sdev_to_hba(sdev);
1518         spin_lock_irqsave(&h->devlock, flags);
1519         sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1520                 sdev_id(sdev), sdev->lun);
1521         if (sd != NULL) {
1522                 sdev->hostdata = sd;
1523                 if (sd->queue_depth)
1524                         scsi_change_queue_depth(sdev, sd->queue_depth);
1525                 atomic_set(&sd->ioaccel_cmds_out, 0);
1526         }
1527         spin_unlock_irqrestore(&h->devlock, flags);
1528         return 0;
1529 }
1530
1531 static void hpsa_slave_destroy(struct scsi_device *sdev)
1532 {
1533         /* nothing to do. */
1534 }
1535
1536 static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1537 {
1538         int i;
1539
1540         if (!h->cmd_sg_list)
1541                 return;
1542         for (i = 0; i < h->nr_cmds; i++) {
1543                 kfree(h->cmd_sg_list[i]);
1544                 h->cmd_sg_list[i] = NULL;
1545         }
1546         kfree(h->cmd_sg_list);
1547         h->cmd_sg_list = NULL;
1548 }
1549
1550 static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1551 {
1552         int i;
1553
1554         if (h->chainsize <= 0)
1555                 return 0;
1556
1557         h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1558                                 GFP_KERNEL);
1559         if (!h->cmd_sg_list) {
1560                 dev_err(&h->pdev->dev, "Failed to allocate SG list\n");
1561                 return -ENOMEM;
1562         }
1563         for (i = 0; i < h->nr_cmds; i++) {
1564                 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1565                                                 h->chainsize, GFP_KERNEL);
1566                 if (!h->cmd_sg_list[i]) {
1567                         dev_err(&h->pdev->dev, "Failed to allocate cmd SG\n");
1568                         goto clean;
1569                 }
1570         }
1571         return 0;
1572
1573 clean:
1574         hpsa_free_sg_chain_blocks(h);
1575         return -ENOMEM;
1576 }
1577
1578 static int hpsa_map_sg_chain_block(struct ctlr_info *h,
1579         struct CommandList *c)
1580 {
1581         struct SGDescriptor *chain_sg, *chain_block;
1582         u64 temp64;
1583         u32 chain_len;
1584
1585         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1586         chain_block = h->cmd_sg_list[c->cmdindex];
1587         chain_sg->Ext = cpu_to_le32(HPSA_SG_CHAIN);
1588         chain_len = sizeof(*chain_sg) *
1589                 (le16_to_cpu(c->Header.SGTotal) - h->max_cmd_sg_entries);
1590         chain_sg->Len = cpu_to_le32(chain_len);
1591         temp64 = pci_map_single(h->pdev, chain_block, chain_len,
1592                                 PCI_DMA_TODEVICE);
1593         if (dma_mapping_error(&h->pdev->dev, temp64)) {
1594                 /* prevent subsequent unmapping */
1595                 chain_sg->Addr = cpu_to_le64(0);
1596                 return -1;
1597         }
1598         chain_sg->Addr = cpu_to_le64(temp64);
1599         return 0;
1600 }
1601
1602 static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1603         struct CommandList *c)
1604 {
1605         struct SGDescriptor *chain_sg;
1606
1607         if (le16_to_cpu(c->Header.SGTotal) <= h->max_cmd_sg_entries)
1608                 return;
1609
1610         chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1611         pci_unmap_single(h->pdev, le64_to_cpu(chain_sg->Addr),
1612                         le32_to_cpu(chain_sg->Len), PCI_DMA_TODEVICE);
1613 }
1614
1615
1616 /* Decode the various types of errors on ioaccel2 path.
1617  * Return 1 for any error that should generate a RAID path retry.
1618  * Return 0 for errors that don't require a RAID path retry.
1619  */
1620 static int handle_ioaccel_mode2_error(struct ctlr_info *h,
1621                                         struct CommandList *c,
1622                                         struct scsi_cmnd *cmd,
1623                                         struct io_accel2_cmd *c2)
1624 {
1625         int data_len;
1626         int retry = 0;
1627
1628         switch (c2->error_data.serv_response) {
1629         case IOACCEL2_SERV_RESPONSE_COMPLETE:
1630                 switch (c2->error_data.status) {
1631                 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1632                         break;
1633                 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1634                         dev_warn(&h->pdev->dev,
1635                                 "%s: task complete with check condition.\n",
1636                                 "HP SSD Smart Path");
1637                         cmd->result |= SAM_STAT_CHECK_CONDITION;
1638                         if (c2->error_data.data_present !=
1639                                         IOACCEL2_SENSE_DATA_PRESENT) {
1640                                 memset(cmd->sense_buffer, 0,
1641                                         SCSI_SENSE_BUFFERSIZE);
1642                                 break;
1643                         }
1644                         /* copy the sense data */
1645                         data_len = c2->error_data.sense_data_len;
1646                         if (data_len > SCSI_SENSE_BUFFERSIZE)
1647                                 data_len = SCSI_SENSE_BUFFERSIZE;
1648                         if (data_len > sizeof(c2->error_data.sense_data_buff))
1649                                 data_len =
1650                                         sizeof(c2->error_data.sense_data_buff);
1651                         memcpy(cmd->sense_buffer,
1652                                 c2->error_data.sense_data_buff, data_len);
1653                         retry = 1;
1654                         break;
1655                 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1656                         dev_warn(&h->pdev->dev,
1657                                 "%s: task complete with BUSY status.\n",
1658                                 "HP SSD Smart Path");
1659                         retry = 1;
1660                         break;
1661                 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1662                         dev_warn(&h->pdev->dev,
1663                                 "%s: task complete with reservation conflict.\n",
1664                                 "HP SSD Smart Path");
1665                         retry = 1;
1666                         break;
1667                 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1668                         /* Make scsi midlayer do unlimited retries */
1669                         cmd->result = DID_IMM_RETRY << 16;
1670                         break;
1671                 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1672                         dev_warn(&h->pdev->dev,
1673                                 "%s: task complete with aborted status.\n",
1674                                 "HP SSD Smart Path");
1675                         retry = 1;
1676                         break;
1677                 default:
1678                         dev_warn(&h->pdev->dev,
1679                                 "%s: task complete with unrecognized status: 0x%02x\n",
1680                                 "HP SSD Smart Path", c2->error_data.status);
1681                         retry = 1;
1682                         break;
1683                 }
1684                 break;
1685         case IOACCEL2_SERV_RESPONSE_FAILURE:
1686                 /* don't expect to get here. */
1687                 dev_warn(&h->pdev->dev,
1688                         "unexpected delivery or target failure, status = 0x%02x\n",
1689                         c2->error_data.status);
1690                 retry = 1;
1691                 break;
1692         case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1693                 break;
1694         case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1695                 break;
1696         case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1697                 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1698                 retry = 1;
1699                 break;
1700         case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1701                 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1702                 break;
1703         default:
1704                 dev_warn(&h->pdev->dev,
1705                         "%s: Unrecognized server response: 0x%02x\n",
1706                         "HP SSD Smart Path",
1707                         c2->error_data.serv_response);
1708                 retry = 1;
1709                 break;
1710         }
1711
1712         return retry;   /* retry on raid path? */
1713 }
1714
1715 static void process_ioaccel2_completion(struct ctlr_info *h,
1716                 struct CommandList *c, struct scsi_cmnd *cmd,
1717                 struct hpsa_scsi_dev_t *dev)
1718 {
1719         struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1720
1721         /* check for good status */
1722         if (likely(c2->error_data.serv_response == 0 &&
1723                         c2->error_data.status == 0)) {
1724                 cmd_free(h, c);
1725                 cmd->scsi_done(cmd);
1726                 return;
1727         }
1728
1729         /* Any RAID offload error results in retry which will use
1730          * the normal I/O path so the controller can handle whatever's
1731          * wrong.
1732          */
1733         if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1734                 c2->error_data.serv_response ==
1735                         IOACCEL2_SERV_RESPONSE_FAILURE) {
1736                 if (c2->error_data.status ==
1737                         IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1738                         dev->offload_enabled = 0;
1739                 goto retry_cmd;
1740         }
1741
1742         if (handle_ioaccel_mode2_error(h, c, cmd, c2))
1743                 goto retry_cmd;
1744
1745         cmd_free(h, c);
1746         cmd->scsi_done(cmd);
1747         return;
1748
1749 retry_cmd:
1750         INIT_WORK(&c->work, hpsa_command_resubmit_worker);
1751         queue_work_on(raw_smp_processor_id(), h->resubmit_wq, &c->work);
1752 }
1753
1754 static void complete_scsi_command(struct CommandList *cp)
1755 {
1756         struct scsi_cmnd *cmd;
1757         struct ctlr_info *h;
1758         struct ErrorInfo *ei;
1759         struct hpsa_scsi_dev_t *dev;
1760
1761         unsigned char sense_key;
1762         unsigned char asc;      /* additional sense code */
1763         unsigned char ascq;     /* additional sense code qualifier */
1764         unsigned long sense_data_size;
1765
1766         ei = cp->err_info;
1767         cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1768         h = cp->h;
1769         dev = cmd->device->hostdata;
1770
1771         scsi_dma_unmap(cmd); /* undo the DMA mappings */
1772         if ((cp->cmd_type == CMD_SCSI) &&
1773                 (le16_to_cpu(cp->Header.SGTotal) > h->max_cmd_sg_entries))
1774                 hpsa_unmap_sg_chain_block(h, cp);
1775
1776         cmd->result = (DID_OK << 16);           /* host byte */
1777         cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
1778
1779         if (cp->cmd_type == CMD_IOACCEL2 || cp->cmd_type == CMD_IOACCEL1)
1780                 atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1781
1782         if (cp->cmd_type == CMD_IOACCEL2)
1783                 return process_ioaccel2_completion(h, cp, cmd, dev);
1784
1785         cmd->result |= ei->ScsiStatus;
1786
1787         scsi_set_resid(cmd, ei->ResidualCnt);
1788         if (ei->CommandStatus == 0) {
1789                 if (cp->cmd_type == CMD_IOACCEL1)
1790                         atomic_dec(&cp->phys_disk->ioaccel_cmds_out);
1791                 cmd_free(h, cp);
1792                 cmd->scsi_done(cmd);
1793                 return;
1794         }
1795
1796         /* copy the sense data */
1797         if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1798                 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1799         else
1800                 sense_data_size = sizeof(ei->SenseInfo);
1801         if (ei->SenseLen < sense_data_size)
1802                 sense_data_size = ei->SenseLen;
1803
1804         memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
1805
1806         /* For I/O accelerator commands, copy over some fields to the normal
1807          * CISS header used below for error handling.
1808          */
1809         if (cp->cmd_type == CMD_IOACCEL1) {
1810                 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1811                 cp->Header.SGList = scsi_sg_count(cmd);
1812                 cp->Header.SGTotal = cpu_to_le16(cp->Header.SGList);
1813                 cp->Request.CDBLen = le16_to_cpu(c->io_flags) &
1814                         IOACCEL1_IOFLAGS_CDBLEN_MASK;
1815                 cp->Header.tag = c->tag;
1816                 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1817                 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
1818
1819                 /* Any RAID offload error results in retry which will use
1820                  * the normal I/O path so the controller can handle whatever's
1821                  * wrong.
1822                  */
1823                 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1824                         if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1825                                 dev->offload_enabled = 0;
1826                         INIT_WORK(&cp->work, hpsa_command_resubmit_worker);
1827                         queue_work_on(raw_smp_processor_id(),
1828                                         h->resubmit_wq, &cp->work);
1829                         return;
1830                 }
1831         }
1832
1833         /* an error has occurred */
1834         switch (ei->CommandStatus) {
1835
1836         case CMD_TARGET_STATUS:
1837                 if (ei->ScsiStatus) {
1838                         /* Get sense key */
1839                         sense_key = 0xf & ei->SenseInfo[2];
1840                         /* Get additional sense code */
1841                         asc = ei->SenseInfo[12];
1842                         /* Get addition sense code qualifier */
1843                         ascq = ei->SenseInfo[13];
1844                 }
1845                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
1846                         if (sense_key == ABORTED_COMMAND) {
1847                                 cmd->result |= DID_SOFT_ERROR << 16;
1848                                 break;
1849                         }
1850                         break;
1851                 }
1852                 /* Problem was not a check condition
1853                  * Pass it up to the upper layers...
1854                  */
1855                 if (ei->ScsiStatus) {
1856                         dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1857                                 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1858                                 "Returning result: 0x%x\n",
1859                                 cp, ei->ScsiStatus,
1860                                 sense_key, asc, ascq,
1861                                 cmd->result);
1862                 } else {  /* scsi status is zero??? How??? */
1863                         dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1864                                 "Returning no connection.\n", cp),
1865
1866                         /* Ordinarily, this case should never happen,
1867                          * but there is a bug in some released firmware
1868                          * revisions that allows it to happen if, for
1869                          * example, a 4100 backplane loses power and
1870                          * the tape drive is in it.  We assume that
1871                          * it's a fatal error of some kind because we
1872                          * can't show that it wasn't. We will make it
1873                          * look like selection timeout since that is
1874                          * the most common reason for this to occur,
1875                          * and it's severe enough.
1876                          */
1877
1878                         cmd->result = DID_NO_CONNECT << 16;
1879                 }
1880                 break;
1881
1882         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1883                 break;
1884         case CMD_DATA_OVERRUN:
1885                 dev_warn(&h->pdev->dev, "cp %p has"
1886                         " completed with data overrun "
1887                         "reported\n", cp);
1888                 break;
1889         case CMD_INVALID: {
1890                 /* print_bytes(cp, sizeof(*cp), 1, 0);
1891                 print_cmd(cp); */
1892                 /* We get CMD_INVALID if you address a non-existent device
1893                  * instead of a selection timeout (no response).  You will
1894                  * see this if you yank out a drive, then try to access it.
1895                  * This is kind of a shame because it means that any other
1896                  * CMD_INVALID (e.g. driver bug) will get interpreted as a
1897                  * missing target. */
1898                 cmd->result = DID_NO_CONNECT << 16;
1899         }
1900                 break;
1901         case CMD_PROTOCOL_ERR:
1902                 cmd->result = DID_ERROR << 16;
1903                 dev_warn(&h->pdev->dev, "cp %p has "
1904                         "protocol error\n", cp);
1905                 break;
1906         case CMD_HARDWARE_ERR:
1907                 cmd->result = DID_ERROR << 16;
1908                 dev_warn(&h->pdev->dev, "cp %p had  hardware error\n", cp);
1909                 break;
1910         case CMD_CONNECTION_LOST:
1911                 cmd->result = DID_ERROR << 16;
1912                 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1913                 break;
1914         case CMD_ABORTED:
1915                 cmd->result = DID_ABORT << 16;
1916                 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1917                                 cp, ei->ScsiStatus);
1918                 break;
1919         case CMD_ABORT_FAILED:
1920                 cmd->result = DID_ERROR << 16;
1921                 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1922                 break;
1923         case CMD_UNSOLICITED_ABORT:
1924                 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1925                 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
1926                         "abort\n", cp);
1927                 break;
1928         case CMD_TIMEOUT:
1929                 cmd->result = DID_TIME_OUT << 16;
1930                 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1931                 break;
1932         case CMD_UNABORTABLE:
1933                 cmd->result = DID_ERROR << 16;
1934                 dev_warn(&h->pdev->dev, "Command unabortable\n");
1935                 break;
1936         case CMD_IOACCEL_DISABLED:
1937                 /* This only handles the direct pass-through case since RAID
1938                  * offload is handled above.  Just attempt a retry.
1939                  */
1940                 cmd->result = DID_SOFT_ERROR << 16;
1941                 dev_warn(&h->pdev->dev,
1942                                 "cp %p had HP SSD Smart Path error\n", cp);
1943                 break;
1944         default:
1945                 cmd->result = DID_ERROR << 16;
1946                 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1947                                 cp, ei->CommandStatus);
1948         }
1949         cmd_free(h, cp);
1950         cmd->scsi_done(cmd);
1951 }
1952
1953 static void hpsa_pci_unmap(struct pci_dev *pdev,
1954         struct CommandList *c, int sg_used, int data_direction)
1955 {
1956         int i;
1957
1958         for (i = 0; i < sg_used; i++)
1959                 pci_unmap_single(pdev, (dma_addr_t) le64_to_cpu(c->SG[i].Addr),
1960                                 le32_to_cpu(c->SG[i].Len),
1961                                 data_direction);
1962 }
1963
1964 static int hpsa_map_one(struct pci_dev *pdev,
1965                 struct CommandList *cp,
1966                 unsigned char *buf,
1967                 size_t buflen,
1968                 int data_direction)
1969 {
1970         u64 addr64;
1971
1972         if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1973                 cp->Header.SGList = 0;
1974                 cp->Header.SGTotal = cpu_to_le16(0);
1975                 return 0;
1976         }
1977
1978         addr64 = pci_map_single(pdev, buf, buflen, data_direction);
1979         if (dma_mapping_error(&pdev->dev, addr64)) {
1980                 /* Prevent subsequent unmap of something never mapped */
1981                 cp->Header.SGList = 0;
1982                 cp->Header.SGTotal = cpu_to_le16(0);
1983                 return -1;
1984         }
1985         cp->SG[0].Addr = cpu_to_le64(addr64);
1986         cp->SG[0].Len = cpu_to_le32(buflen);
1987         cp->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* we are not chaining */
1988         cp->Header.SGList = 1;   /* no. SGs contig in this cmd */
1989         cp->Header.SGTotal = cpu_to_le16(1); /* total sgs in cmd list */
1990         return 0;
1991 }
1992
1993 static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1994         struct CommandList *c)
1995 {
1996         DECLARE_COMPLETION_ONSTACK(wait);
1997
1998         c->waiting = &wait;
1999         enqueue_cmd_and_start_io(h, c);
2000         wait_for_completion(&wait);
2001 }
2002
2003 static u32 lockup_detected(struct ctlr_info *h)
2004 {
2005         int cpu;
2006         u32 rc, *lockup_detected;
2007
2008         cpu = get_cpu();
2009         lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
2010         rc = *lockup_detected;
2011         put_cpu();
2012         return rc;
2013 }
2014
2015 static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
2016         struct CommandList *c)
2017 {
2018         /* If controller lockup detected, fake a hardware error. */
2019         if (unlikely(lockup_detected(h)))
2020                 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
2021         else
2022                 hpsa_scsi_do_simple_cmd_core(h, c);
2023 }
2024
2025 #define MAX_DRIVER_CMD_RETRIES 25
2026 static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2027         struct CommandList *c, int data_direction)
2028 {
2029         int backoff_time = 10, retry_count = 0;
2030
2031         do {
2032                 memset(c->err_info, 0, sizeof(*c->err_info));
2033                 hpsa_scsi_do_simple_cmd_core(h, c);
2034                 retry_count++;
2035                 if (retry_count > 3) {
2036                         msleep(backoff_time);
2037                         if (backoff_time < 1000)
2038                                 backoff_time *= 2;
2039                 }
2040         } while ((check_for_unit_attention(h, c) ||
2041                         check_for_busy(h, c)) &&
2042                         retry_count <= MAX_DRIVER_CMD_RETRIES);
2043         hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2044 }
2045
2046 static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2047                                 struct CommandList *c)
2048 {
2049         const u8 *cdb = c->Request.CDB;
2050         const u8 *lun = c->Header.LUN.LunAddrBytes;
2051
2052         dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2053         " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2054                 txt, lun[0], lun[1], lun[2], lun[3],
2055                 lun[4], lun[5], lun[6], lun[7],
2056                 cdb[0], cdb[1], cdb[2], cdb[3],
2057                 cdb[4], cdb[5], cdb[6], cdb[7],
2058                 cdb[8], cdb[9], cdb[10], cdb[11],
2059                 cdb[12], cdb[13], cdb[14], cdb[15]);
2060 }
2061
2062 static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2063                         struct CommandList *cp)
2064 {
2065         const struct ErrorInfo *ei = cp->err_info;
2066         struct device *d = &cp->h->pdev->dev;
2067         const u8 *sd = ei->SenseInfo;
2068
2069         switch (ei->CommandStatus) {
2070         case CMD_TARGET_STATUS:
2071                 hpsa_print_cmd(h, "SCSI status", cp);
2072                 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2073                         dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2074                                 sd[2] & 0x0f, sd[12], sd[13]);
2075                 else
2076                         dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
2077                 if (ei->ScsiStatus == 0)
2078                         dev_warn(d, "SCSI status is abnormally zero.  "
2079                         "(probably indicates selection timeout "
2080                         "reported incorrectly due to a known "
2081                         "firmware bug, circa July, 2001.)\n");
2082                 break;
2083         case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
2084                 break;
2085         case CMD_DATA_OVERRUN:
2086                 hpsa_print_cmd(h, "overrun condition", cp);
2087                 break;
2088         case CMD_INVALID: {
2089                 /* controller unfortunately reports SCSI passthru's
2090                  * to non-existent targets as invalid commands.
2091                  */
2092                 hpsa_print_cmd(h, "invalid command", cp);
2093                 dev_warn(d, "probably means device no longer present\n");
2094                 }
2095                 break;
2096         case CMD_PROTOCOL_ERR:
2097                 hpsa_print_cmd(h, "protocol error", cp);
2098                 break;
2099         case CMD_HARDWARE_ERR:
2100                 hpsa_print_cmd(h, "hardware error", cp);
2101                 break;
2102         case CMD_CONNECTION_LOST:
2103                 hpsa_print_cmd(h, "connection lost", cp);
2104                 break;
2105         case CMD_ABORTED:
2106                 hpsa_print_cmd(h, "aborted", cp);
2107                 break;
2108         case CMD_ABORT_FAILED:
2109                 hpsa_print_cmd(h, "abort failed", cp);
2110                 break;
2111         case CMD_UNSOLICITED_ABORT:
2112                 hpsa_print_cmd(h, "unsolicited abort", cp);
2113                 break;
2114         case CMD_TIMEOUT:
2115                 hpsa_print_cmd(h, "timed out", cp);
2116                 break;
2117         case CMD_UNABORTABLE:
2118                 hpsa_print_cmd(h, "unabortable", cp);
2119                 break;
2120         default:
2121                 hpsa_print_cmd(h, "unknown status", cp);
2122                 dev_warn(d, "Unknown command status %x\n",
2123                                 ei->CommandStatus);
2124         }
2125 }
2126
2127 static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
2128                         u16 page, unsigned char *buf,
2129                         unsigned char bufsize)
2130 {
2131         int rc = IO_OK;
2132         struct CommandList *c;
2133         struct ErrorInfo *ei;
2134
2135         c = cmd_alloc(h);
2136
2137         if (c == NULL) {
2138                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2139                 return -ENOMEM;
2140         }
2141
2142         if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2143                         page, scsi3addr, TYPE_CMD)) {
2144                 rc = -1;
2145                 goto out;
2146         }
2147         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2148         ei = c->err_info;
2149         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2150                 hpsa_scsi_interpret_error(h, c);
2151                 rc = -1;
2152         }
2153 out:
2154         cmd_free(h, c);
2155         return rc;
2156 }
2157
2158 static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h,
2159                 unsigned char *scsi3addr, unsigned char page,
2160                 struct bmic_controller_parameters *buf, size_t bufsize)
2161 {
2162         int rc = IO_OK;
2163         struct CommandList *c;
2164         struct ErrorInfo *ei;
2165
2166         c = cmd_alloc(h);
2167         if (c == NULL) {                        /* trouble... */
2168                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2169                 return -ENOMEM;
2170         }
2171
2172         if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize,
2173                         page, scsi3addr, TYPE_CMD)) {
2174                 rc = -1;
2175                 goto out;
2176         }
2177         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2178         ei = c->err_info;
2179         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2180                 hpsa_scsi_interpret_error(h, c);
2181                 rc = -1;
2182         }
2183 out:
2184         cmd_free(h, c);
2185         return rc;
2186         }
2187
2188 static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2189         u8 reset_type)
2190 {
2191         int rc = IO_OK;
2192         struct CommandList *c;
2193         struct ErrorInfo *ei;
2194
2195         c = cmd_alloc(h);
2196
2197         if (c == NULL) {                        /* trouble... */
2198                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2199                 return -ENOMEM;
2200         }
2201
2202         /* fill_cmd can't fail here, no data buffer to map. */
2203         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2204                         scsi3addr, TYPE_MSG);
2205         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
2206         hpsa_scsi_do_simple_cmd_core(h, c);
2207         /* no unmap needed here because no data xfer. */
2208
2209         ei = c->err_info;
2210         if (ei->CommandStatus != 0) {
2211                 hpsa_scsi_interpret_error(h, c);
2212                 rc = -1;
2213         }
2214         cmd_free(h, c);
2215         return rc;
2216 }
2217
2218 static void hpsa_get_raid_level(struct ctlr_info *h,
2219         unsigned char *scsi3addr, unsigned char *raid_level)
2220 {
2221         int rc;
2222         unsigned char *buf;
2223
2224         *raid_level = RAID_UNKNOWN;
2225         buf = kzalloc(64, GFP_KERNEL);
2226         if (!buf)
2227                 return;
2228         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
2229         if (rc == 0)
2230                 *raid_level = buf[8];
2231         if (*raid_level > RAID_UNKNOWN)
2232                 *raid_level = RAID_UNKNOWN;
2233         kfree(buf);
2234         return;
2235 }
2236
2237 #define HPSA_MAP_DEBUG
2238 #ifdef HPSA_MAP_DEBUG
2239 static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2240                                 struct raid_map_data *map_buff)
2241 {
2242         struct raid_map_disk_data *dd = &map_buff->data[0];
2243         int map, row, col;
2244         u16 map_cnt, row_cnt, disks_per_row;
2245
2246         if (rc != 0)
2247                 return;
2248
2249         /* Show details only if debugging has been activated. */
2250         if (h->raid_offload_debug < 2)
2251                 return;
2252
2253         dev_info(&h->pdev->dev, "structure_size = %u\n",
2254                                 le32_to_cpu(map_buff->structure_size));
2255         dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2256                         le32_to_cpu(map_buff->volume_blk_size));
2257         dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2258                         le64_to_cpu(map_buff->volume_blk_cnt));
2259         dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2260                         map_buff->phys_blk_shift);
2261         dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2262                         map_buff->parity_rotation_shift);
2263         dev_info(&h->pdev->dev, "strip_size = %u\n",
2264                         le16_to_cpu(map_buff->strip_size));
2265         dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2266                         le64_to_cpu(map_buff->disk_starting_blk));
2267         dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2268                         le64_to_cpu(map_buff->disk_blk_cnt));
2269         dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2270                         le16_to_cpu(map_buff->data_disks_per_row));
2271         dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2272                         le16_to_cpu(map_buff->metadata_disks_per_row));
2273         dev_info(&h->pdev->dev, "row_cnt = %u\n",
2274                         le16_to_cpu(map_buff->row_cnt));
2275         dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2276                         le16_to_cpu(map_buff->layout_map_count));
2277         dev_info(&h->pdev->dev, "flags = 0x%x\n",
2278                         le16_to_cpu(map_buff->flags));
2279         dev_info(&h->pdev->dev, "encrypytion = %s\n",
2280                         le16_to_cpu(map_buff->flags) &
2281                         RAID_MAP_FLAG_ENCRYPT_ON ?  "ON" : "OFF");
2282         dev_info(&h->pdev->dev, "dekindex = %u\n",
2283                         le16_to_cpu(map_buff->dekindex));
2284         map_cnt = le16_to_cpu(map_buff->layout_map_count);
2285         for (map = 0; map < map_cnt; map++) {
2286                 dev_info(&h->pdev->dev, "Map%u:\n", map);
2287                 row_cnt = le16_to_cpu(map_buff->row_cnt);
2288                 for (row = 0; row < row_cnt; row++) {
2289                         dev_info(&h->pdev->dev, "  Row%u:\n", row);
2290                         disks_per_row =
2291                                 le16_to_cpu(map_buff->data_disks_per_row);
2292                         for (col = 0; col < disks_per_row; col++, dd++)
2293                                 dev_info(&h->pdev->dev,
2294                                         "    D%02u: h=0x%04x xor=%u,%u\n",
2295                                         col, dd->ioaccel_handle,
2296                                         dd->xor_mult[0], dd->xor_mult[1]);
2297                         disks_per_row =
2298                                 le16_to_cpu(map_buff->metadata_disks_per_row);
2299                         for (col = 0; col < disks_per_row; col++, dd++)
2300                                 dev_info(&h->pdev->dev,
2301                                         "    M%02u: h=0x%04x xor=%u,%u\n",
2302                                         col, dd->ioaccel_handle,
2303                                         dd->xor_mult[0], dd->xor_mult[1]);
2304                 }
2305         }
2306 }
2307 #else
2308 static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2309                         __attribute__((unused)) int rc,
2310                         __attribute__((unused)) struct raid_map_data *map_buff)
2311 {
2312 }
2313 #endif
2314
2315 static int hpsa_get_raid_map(struct ctlr_info *h,
2316         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2317 {
2318         int rc = 0;
2319         struct CommandList *c;
2320         struct ErrorInfo *ei;
2321
2322         c = cmd_alloc(h);
2323         if (c == NULL) {
2324                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2325                 return -ENOMEM;
2326         }
2327         if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2328                         sizeof(this_device->raid_map), 0,
2329                         scsi3addr, TYPE_CMD)) {
2330                 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2331                 cmd_free(h, c);
2332                 return -ENOMEM;
2333         }
2334         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2335         ei = c->err_info;
2336         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2337                 hpsa_scsi_interpret_error(h, c);
2338                 cmd_free(h, c);
2339                 return -1;
2340         }
2341         cmd_free(h, c);
2342
2343         /* @todo in the future, dynamically allocate RAID map memory */
2344         if (le32_to_cpu(this_device->raid_map.structure_size) >
2345                                 sizeof(this_device->raid_map)) {
2346                 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2347                 rc = -1;
2348         }
2349         hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2350         return rc;
2351 }
2352
2353 static int hpsa_bmic_id_physical_device(struct ctlr_info *h,
2354                 unsigned char scsi3addr[], u16 bmic_device_index,
2355                 struct bmic_identify_physical_device *buf, size_t bufsize)
2356 {
2357         int rc = IO_OK;
2358         struct CommandList *c;
2359         struct ErrorInfo *ei;
2360
2361         c = cmd_alloc(h);
2362         rc = fill_cmd(c, BMIC_IDENTIFY_PHYSICAL_DEVICE, h, buf, bufsize,
2363                 0, RAID_CTLR_LUNID, TYPE_CMD);
2364         if (rc)
2365                 goto out;
2366
2367         c->Request.CDB[2] = bmic_device_index & 0xff;
2368         c->Request.CDB[9] = (bmic_device_index >> 8) & 0xff;
2369
2370         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2371         ei = c->err_info;
2372         if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2373                 hpsa_scsi_interpret_error(h, c);
2374                 rc = -1;
2375         }
2376 out:
2377         cmd_free(h, c);
2378         return rc;
2379 }
2380
2381 static int hpsa_vpd_page_supported(struct ctlr_info *h,
2382         unsigned char scsi3addr[], u8 page)
2383 {
2384         int rc;
2385         int i;
2386         int pages;
2387         unsigned char *buf, bufsize;
2388
2389         buf = kzalloc(256, GFP_KERNEL);
2390         if (!buf)
2391                 return 0;
2392
2393         /* Get the size of the page list first */
2394         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2395                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2396                                 buf, HPSA_VPD_HEADER_SZ);
2397         if (rc != 0)
2398                 goto exit_unsupported;
2399         pages = buf[3];
2400         if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2401                 bufsize = pages + HPSA_VPD_HEADER_SZ;
2402         else
2403                 bufsize = 255;
2404
2405         /* Get the whole VPD page list */
2406         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2407                                 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2408                                 buf, bufsize);
2409         if (rc != 0)
2410                 goto exit_unsupported;
2411
2412         pages = buf[3];
2413         for (i = 1; i <= pages; i++)
2414                 if (buf[3 + i] == page)
2415                         goto exit_supported;
2416 exit_unsupported:
2417         kfree(buf);
2418         return 0;
2419 exit_supported:
2420         kfree(buf);
2421         return 1;
2422 }
2423
2424 static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2425         unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2426 {
2427         int rc;
2428         unsigned char *buf;
2429         u8 ioaccel_status;
2430
2431         this_device->offload_config = 0;
2432         this_device->offload_enabled = 0;
2433
2434         buf = kzalloc(64, GFP_KERNEL);
2435         if (!buf)
2436                 return;
2437         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2438                 goto out;
2439         rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2440                         VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2441         if (rc != 0)
2442                 goto out;
2443
2444 #define IOACCEL_STATUS_BYTE 4
2445 #define OFFLOAD_CONFIGURED_BIT 0x01
2446 #define OFFLOAD_ENABLED_BIT 0x02
2447         ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2448         this_device->offload_config =
2449                 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2450         if (this_device->offload_config) {
2451                 this_device->offload_enabled =
2452                         !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2453                 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2454                         this_device->offload_enabled = 0;
2455         }
2456 out:
2457         kfree(buf);
2458         return;
2459 }
2460
2461 /* Get the device id from inquiry page 0x83 */
2462 static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2463         unsigned char *device_id, int buflen)
2464 {
2465         int rc;
2466         unsigned char *buf;
2467
2468         if (buflen > 16)
2469                 buflen = 16;
2470         buf = kzalloc(64, GFP_KERNEL);
2471         if (!buf)
2472                 return -ENOMEM;
2473         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
2474         if (rc == 0)
2475                 memcpy(device_id, &buf[8], buflen);
2476         kfree(buf);
2477         return rc != 0;
2478 }
2479
2480 static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2481                 void *buf, int bufsize,
2482                 int extended_response)
2483 {
2484         int rc = IO_OK;
2485         struct CommandList *c;
2486         unsigned char scsi3addr[8];
2487         struct ErrorInfo *ei;
2488
2489         c = cmd_alloc(h);
2490         if (c == NULL) {                        /* trouble... */
2491                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
2492                 return -1;
2493         }
2494         /* address the controller */
2495         memset(scsi3addr, 0, sizeof(scsi3addr));
2496         if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2497                 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2498                 rc = -1;
2499                 goto out;
2500         }
2501         if (extended_response)
2502                 c->Request.CDB[1] = extended_response;
2503         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2504         ei = c->err_info;
2505         if (ei->CommandStatus != 0 &&
2506             ei->CommandStatus != CMD_DATA_UNDERRUN) {
2507                 hpsa_scsi_interpret_error(h, c);
2508                 rc = -1;
2509         } else {
2510                 struct ReportLUNdata *rld = buf;
2511
2512                 if (rld->extended_response_flag != extended_response) {
2513                         dev_err(&h->pdev->dev,
2514                                 "report luns requested format %u, got %u\n",
2515                                 extended_response,
2516                                 rld->extended_response_flag);
2517                         rc = -1;
2518                 }
2519         }
2520 out:
2521         cmd_free(h, c);
2522         return rc;
2523 }
2524
2525 static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2526                 struct ReportExtendedLUNdata *buf, int bufsize)
2527 {
2528         return hpsa_scsi_do_report_luns(h, 0, buf, bufsize,
2529                                                 HPSA_REPORT_PHYS_EXTENDED);
2530 }
2531
2532 static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2533                 struct ReportLUNdata *buf, int bufsize)
2534 {
2535         return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2536 }
2537
2538 static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2539         int bus, int target, int lun)
2540 {
2541         device->bus = bus;
2542         device->target = target;
2543         device->lun = lun;
2544 }
2545
2546 /* Use VPD inquiry to get details of volume status */
2547 static int hpsa_get_volume_status(struct ctlr_info *h,
2548                                         unsigned char scsi3addr[])
2549 {
2550         int rc;
2551         int status;
2552         int size;
2553         unsigned char *buf;
2554
2555         buf = kzalloc(64, GFP_KERNEL);
2556         if (!buf)
2557                 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2558
2559         /* Does controller have VPD for logical volume status? */
2560         if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS))
2561                 goto exit_failed;
2562
2563         /* Get the size of the VPD return buffer */
2564         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2565                                         buf, HPSA_VPD_HEADER_SZ);
2566         if (rc != 0)
2567                 goto exit_failed;
2568         size = buf[3];
2569
2570         /* Now get the whole VPD buffer */
2571         rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2572                                         buf, size + HPSA_VPD_HEADER_SZ);
2573         if (rc != 0)
2574                 goto exit_failed;
2575         status = buf[4]; /* status byte */
2576
2577         kfree(buf);
2578         return status;
2579 exit_failed:
2580         kfree(buf);
2581         return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2582 }
2583
2584 /* Determine offline status of a volume.
2585  * Return either:
2586  *  0 (not offline)
2587  *  0xff (offline for unknown reasons)
2588  *  # (integer code indicating one of several NOT READY states
2589  *     describing why a volume is to be kept offline)
2590  */
2591 static int hpsa_volume_offline(struct ctlr_info *h,
2592                                         unsigned char scsi3addr[])
2593 {
2594         struct CommandList *c;
2595         unsigned char *sense, sense_key, asc, ascq;
2596         int ldstat = 0;
2597         u16 cmd_status;
2598         u8 scsi_status;
2599 #define ASC_LUN_NOT_READY 0x04
2600 #define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2601 #define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2602
2603         c = cmd_alloc(h);
2604         if (!c)
2605                 return 0;
2606         (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2607         hpsa_scsi_do_simple_cmd_core(h, c);
2608         sense = c->err_info->SenseInfo;
2609         sense_key = sense[2];
2610         asc = sense[12];
2611         ascq = sense[13];
2612         cmd_status = c->err_info->CommandStatus;
2613         scsi_status = c->err_info->ScsiStatus;
2614         cmd_free(h, c);
2615         /* Is the volume 'not ready'? */
2616         if (cmd_status != CMD_TARGET_STATUS ||
2617                 scsi_status != SAM_STAT_CHECK_CONDITION ||
2618                 sense_key != NOT_READY ||
2619                 asc != ASC_LUN_NOT_READY)  {
2620                 return 0;
2621         }
2622
2623         /* Determine the reason for not ready state */
2624         ldstat = hpsa_get_volume_status(h, scsi3addr);
2625
2626         /* Keep volume offline in certain cases: */
2627         switch (ldstat) {
2628         case HPSA_LV_UNDERGOING_ERASE:
2629         case HPSA_LV_UNDERGOING_RPI:
2630         case HPSA_LV_PENDING_RPI:
2631         case HPSA_LV_ENCRYPTED_NO_KEY:
2632         case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2633         case HPSA_LV_UNDERGOING_ENCRYPTION:
2634         case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2635         case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2636                 return ldstat;
2637         case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2638                 /* If VPD status page isn't available,
2639                  * use ASC/ASCQ to determine state
2640                  */
2641                 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2642                         (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2643                         return ldstat;
2644                 break;
2645         default:
2646                 break;
2647         }
2648         return 0;
2649 }
2650
2651 static int hpsa_update_device_info(struct ctlr_info *h,
2652         unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2653         unsigned char *is_OBDR_device)
2654 {
2655
2656 #define OBDR_SIG_OFFSET 43
2657 #define OBDR_TAPE_SIG "$DR-10"
2658 #define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2659 #define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2660
2661         unsigned char *inq_buff;
2662         unsigned char *obdr_sig;
2663
2664         inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
2665         if (!inq_buff)
2666                 goto bail_out;
2667
2668         /* Do an inquiry to the device to see what it is. */
2669         if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2670                 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2671                 /* Inquiry failed (msg printed already) */
2672                 dev_err(&h->pdev->dev,
2673                         "hpsa_update_device_info: inquiry failed\n");
2674                 goto bail_out;
2675         }
2676
2677         this_device->devtype = (inq_buff[0] & 0x1f);
2678         memcpy(this_device->scsi3addr, scsi3addr, 8);
2679         memcpy(this_device->vendor, &inq_buff[8],
2680                 sizeof(this_device->vendor));
2681         memcpy(this_device->model, &inq_buff[16],
2682                 sizeof(this_device->model));
2683         memset(this_device->device_id, 0,
2684                 sizeof(this_device->device_id));
2685         hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2686                 sizeof(this_device->device_id));
2687
2688         if (this_device->devtype == TYPE_DISK &&
2689                 is_logical_dev_addr_mode(scsi3addr)) {
2690                 int volume_offline;
2691
2692                 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
2693                 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2694                         hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2695                 volume_offline = hpsa_volume_offline(h, scsi3addr);
2696                 if (volume_offline < 0 || volume_offline > 0xff)
2697                         volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED;
2698                 this_device->volume_offline = volume_offline & 0xff;
2699         } else {
2700                 this_device->raid_level = RAID_UNKNOWN;
2701                 this_device->offload_config = 0;
2702                 this_device->offload_enabled = 0;
2703                 this_device->volume_offline = 0;
2704                 this_device->queue_depth = h->nr_cmds;
2705         }
2706
2707         if (is_OBDR_device) {
2708                 /* See if this is a One-Button-Disaster-Recovery device
2709                  * by looking for "$DR-10" at offset 43 in inquiry data.
2710                  */
2711                 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2712                 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2713                                         strncmp(obdr_sig, OBDR_TAPE_SIG,
2714                                                 OBDR_SIG_LEN) == 0);
2715         }
2716
2717         kfree(inq_buff);
2718         return 0;
2719
2720 bail_out:
2721         kfree(inq_buff);
2722         return 1;
2723 }
2724
2725 static unsigned char *ext_target_model[] = {
2726         "MSA2012",
2727         "MSA2024",
2728         "MSA2312",
2729         "MSA2324",
2730         "P2000 G3 SAS",
2731         "MSA 2040 SAS",
2732         NULL,
2733 };
2734
2735 static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
2736 {
2737         int i;
2738
2739         for (i = 0; ext_target_model[i]; i++)
2740                 if (strncmp(device->model, ext_target_model[i],
2741                         strlen(ext_target_model[i])) == 0)
2742                         return 1;
2743         return 0;
2744 }
2745
2746 /* Helper function to assign bus, target, lun mapping of devices.
2747  * Puts non-external target logical volumes on bus 0, external target logical
2748  * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2749  * Logical drive target and lun are assigned at this time, but
2750  * physical device lun and target assignment are deferred (assigned
2751  * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2752  */
2753 static void figure_bus_target_lun(struct ctlr_info *h,
2754         u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
2755 {
2756         u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
2757
2758         if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2759                 /* physical device, target and lun filled in later */
2760                 if (is_hba_lunid(lunaddrbytes))
2761                         hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
2762                 else
2763                         /* defer target, lun assignment for physical devices */
2764                         hpsa_set_bus_target_lun(device, 2, -1, -1);
2765                 return;
2766         }
2767         /* It's a logical device */
2768         if (is_ext_target(h, device)) {
2769                 /* external target way, put logicals on bus 1
2770                  * and match target/lun numbers box
2771                  * reports, other smart array, bus 0, target 0, match lunid
2772                  */
2773                 hpsa_set_bus_target_lun(device,
2774                         1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2775                 return;
2776         }
2777         hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
2778 }
2779
2780 /*
2781  * If there is no lun 0 on a target, linux won't find any devices.
2782  * For the external targets (arrays), we have to manually detect the enclosure
2783  * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2784  * it for some reason.  *tmpdevice is the target we're adding,
2785  * this_device is a pointer into the current element of currentsd[]
2786  * that we're building up in update_scsi_devices(), below.
2787  * lunzerobits is a bitmap that tracks which targets already have a
2788  * lun 0 assigned.
2789  * Returns 1 if an enclosure was added, 0 if not.
2790  */
2791 static int add_ext_target_dev(struct ctlr_info *h,
2792         struct hpsa_scsi_dev_t *tmpdevice,
2793         struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
2794         unsigned long lunzerobits[], int *n_ext_target_devs)
2795 {
2796         unsigned char scsi3addr[8];
2797
2798         if (test_bit(tmpdevice->target, lunzerobits))
2799                 return 0; /* There is already a lun 0 on this target. */
2800
2801         if (!is_logical_dev_addr_mode(lunaddrbytes))
2802                 return 0; /* It's the logical targets that may lack lun 0. */
2803
2804         if (!is_ext_target(h, tmpdevice))
2805                 return 0; /* Only external target devices have this problem. */
2806
2807         if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
2808                 return 0;
2809
2810         memset(scsi3addr, 0, 8);
2811         scsi3addr[3] = tmpdevice->target;
2812         if (is_hba_lunid(scsi3addr))
2813                 return 0; /* Don't add the RAID controller here. */
2814
2815         if (is_scsi_rev_5(h))
2816                 return 0; /* p1210m doesn't need to do this. */
2817
2818         if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
2819                 dev_warn(&h->pdev->dev, "Maximum number of external "
2820                         "target devices exceeded.  Check your hardware "
2821                         "configuration.");
2822                 return 0;
2823         }
2824
2825         if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
2826                 return 0;
2827         (*n_ext_target_devs)++;
2828         hpsa_set_bus_target_lun(this_device,
2829                                 tmpdevice->bus, tmpdevice->target, 0);
2830         set_bit(tmpdevice->target, lunzerobits);
2831         return 1;
2832 }
2833
2834 /*
2835  * Get address of physical disk used for an ioaccel2 mode command:
2836  *      1. Extract ioaccel2 handle from the command.
2837  *      2. Find a matching ioaccel2 handle from list of physical disks.
2838  *      3. Return:
2839  *              1 and set scsi3addr to address of matching physical
2840  *              0 if no matching physical disk was found.
2841  */
2842 static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2843         struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2844 {
2845         struct ReportExtendedLUNdata *physicals = NULL;
2846         int responsesize = 24;  /* size of physical extended response */
2847         int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2848         u32 nphysicals = 0;     /* number of reported physical devs */
2849         int found = 0;          /* found match (1) or not (0) */
2850         u32 find;               /* handle we need to match */
2851         int i;
2852         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2853         struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2854         struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2855         __le32 it_nexus;        /* 4 byte device handle for the ioaccel2 cmd */
2856         __le32 scsi_nexus;      /* 4 byte device handle for the ioaccel2 cmd */
2857
2858         if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2859                 return 0; /* no match */
2860
2861         /* point to the ioaccel2 device handle */
2862         c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2863         if (c2a == NULL)
2864                 return 0; /* no match */
2865
2866         scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2867         if (scmd == NULL)
2868                 return 0; /* no match */
2869
2870         d = scmd->device->hostdata;
2871         if (d == NULL)
2872                 return 0; /* no match */
2873
2874         it_nexus = cpu_to_le32(d->ioaccel_handle);
2875         scsi_nexus = c2a->scsi_nexus;
2876         find = le32_to_cpu(c2a->scsi_nexus);
2877
2878         if (h->raid_offload_debug > 0)
2879                 dev_info(&h->pdev->dev,
2880                         "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2881                         __func__, scsi_nexus,
2882                         d->device_id[0], d->device_id[1], d->device_id[2],
2883                         d->device_id[3], d->device_id[4], d->device_id[5],
2884                         d->device_id[6], d->device_id[7], d->device_id[8],
2885                         d->device_id[9], d->device_id[10], d->device_id[11],
2886                         d->device_id[12], d->device_id[13], d->device_id[14],
2887                         d->device_id[15]);
2888
2889         /* Get the list of physical devices */
2890         physicals = kzalloc(reportsize, GFP_KERNEL);
2891         if (physicals == NULL)
2892                 return 0;
2893         if (hpsa_scsi_do_report_phys_luns(h, physicals, reportsize)) {
2894                 dev_err(&h->pdev->dev,
2895                         "Can't lookup %s device handle: report physical LUNs failed.\n",
2896                         "HP SSD Smart Path");
2897                 kfree(physicals);
2898                 return 0;
2899         }
2900         nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2901                                                         responsesize;
2902
2903         /* find ioaccel2 handle in list of physicals: */
2904         for (i = 0; i < nphysicals; i++) {
2905                 struct ext_report_lun_entry *entry = &physicals->LUN[i];
2906
2907                 /* handle is in bytes 28-31 of each lun */
2908                 if (entry->ioaccel_handle != find)
2909                         continue; /* didn't match */
2910                 found = 1;
2911                 memcpy(scsi3addr, entry->lunid, 8);
2912                 if (h->raid_offload_debug > 0)
2913                         dev_info(&h->pdev->dev,
2914                                 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n",
2915                                 __func__, find,
2916                                 entry->ioaccel_handle, scsi3addr);
2917                 break; /* found it */
2918         }
2919
2920         kfree(physicals);
2921         if (found)
2922                 return 1;
2923         else
2924                 return 0;
2925
2926 }
2927 /*
2928  * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,
2929  * logdev.  The number of luns in physdev and logdev are returned in
2930  * *nphysicals and *nlogicals, respectively.
2931  * Returns 0 on success, -1 otherwise.
2932  */
2933 static int hpsa_gather_lun_info(struct ctlr_info *h,
2934         struct ReportExtendedLUNdata *physdev, u32 *nphysicals,
2935         struct ReportLUNdata *logdev, u32 *nlogicals)
2936 {
2937         if (hpsa_scsi_do_report_phys_luns(h, physdev, sizeof(*physdev))) {
2938                 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2939                 return -1;
2940         }
2941         *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 24;
2942         if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2943                 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded. %d LUNs ignored.\n",
2944                         HPSA_MAX_PHYS_LUN, *nphysicals - HPSA_MAX_PHYS_LUN);
2945                 *nphysicals = HPSA_MAX_PHYS_LUN;
2946         }
2947         if (hpsa_scsi_do_report_log_luns(h, logdev, sizeof(*logdev))) {
2948                 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2949                 return -1;
2950         }
2951         *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
2952         /* Reject Logicals in excess of our max capability. */
2953         if (*nlogicals > HPSA_MAX_LUN) {
2954                 dev_warn(&h->pdev->dev,
2955                         "maximum logical LUNs (%d) exceeded.  "
2956                         "%d LUNs ignored.\n", HPSA_MAX_LUN,
2957                         *nlogicals - HPSA_MAX_LUN);
2958                         *nlogicals = HPSA_MAX_LUN;
2959         }
2960         if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2961                 dev_warn(&h->pdev->dev,
2962                         "maximum logical + physical LUNs (%d) exceeded. "
2963                         "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2964                         *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2965                 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2966         }
2967         return 0;
2968 }
2969
2970 static u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position,
2971         int i, int nphysicals, int nlogicals,
2972         struct ReportExtendedLUNdata *physdev_list,
2973         struct ReportLUNdata *logdev_list)
2974 {
2975         /* Helper function, figure out where the LUN ID info is coming from
2976          * given index i, lists of physical and logical devices, where in
2977          * the list the raid controller is supposed to appear (first or last)
2978          */
2979
2980         int logicals_start = nphysicals + (raid_ctlr_position == 0);
2981         int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2982
2983         if (i == raid_ctlr_position)
2984                 return RAID_CTLR_LUNID;
2985
2986         if (i < logicals_start)
2987                 return &physdev_list->LUN[i -
2988                                 (raid_ctlr_position == 0)].lunid[0];
2989
2990         if (i < last_device)
2991                 return &logdev_list->LUN[i - nphysicals -
2992                         (raid_ctlr_position == 0)][0];
2993         BUG();
2994         return NULL;
2995 }
2996
2997 static int hpsa_hba_mode_enabled(struct ctlr_info *h)
2998 {
2999         int rc;
3000         int hba_mode_enabled;
3001         struct bmic_controller_parameters *ctlr_params;
3002         ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters),
3003                 GFP_KERNEL);
3004
3005         if (!ctlr_params)
3006                 return -ENOMEM;
3007         rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params,
3008                 sizeof(struct bmic_controller_parameters));
3009         if (rc) {
3010                 kfree(ctlr_params);
3011                 return rc;
3012         }
3013
3014         hba_mode_enabled =
3015                 ((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0);
3016         kfree(ctlr_params);
3017         return hba_mode_enabled;
3018 }
3019
3020 /* get physical drive ioaccel handle and queue depth */
3021 static void hpsa_get_ioaccel_drive_info(struct ctlr_info *h,
3022                 struct hpsa_scsi_dev_t *dev,
3023                 u8 *lunaddrbytes,
3024                 struct bmic_identify_physical_device *id_phys)
3025 {
3026         int rc;
3027         struct ext_report_lun_entry *rle =
3028                 (struct ext_report_lun_entry *) lunaddrbytes;
3029
3030         dev->ioaccel_handle = rle->ioaccel_handle;
3031         memset(id_phys, 0, sizeof(*id_phys));
3032         rc = hpsa_bmic_id_physical_device(h, lunaddrbytes,
3033                         GET_BMIC_DRIVE_NUMBER(lunaddrbytes), id_phys,
3034                         sizeof(*id_phys));
3035         if (!rc)
3036                 /* Reserve space for FW operations */
3037 #define DRIVE_CMDS_RESERVED_FOR_FW 2
3038 #define DRIVE_QUEUE_DEPTH 7
3039                 dev->queue_depth =
3040                         le16_to_cpu(id_phys->current_queue_depth_limit) -
3041                                 DRIVE_CMDS_RESERVED_FOR_FW;
3042         else
3043                 dev->queue_depth = DRIVE_QUEUE_DEPTH; /* conservative */
3044         atomic_set(&dev->ioaccel_cmds_out, 0);
3045 }
3046
3047 static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
3048 {
3049         /* the idea here is we could get notified
3050          * that some devices have changed, so we do a report
3051          * physical luns and report logical luns cmd, and adjust
3052          * our list of devices accordingly.
3053          *
3054          * The scsi3addr's of devices won't change so long as the
3055          * adapter is not reset.  That means we can rescan and
3056          * tell which devices we already know about, vs. new
3057          * devices, vs.  disappearing devices.
3058          */
3059         struct ReportExtendedLUNdata *physdev_list = NULL;
3060         struct ReportLUNdata *logdev_list = NULL;
3061         struct bmic_identify_physical_device *id_phys = NULL;
3062         u32 nphysicals = 0;
3063         u32 nlogicals = 0;
3064         u32 ndev_allocated = 0;
3065         struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
3066         int ncurrent = 0;
3067         int i, n_ext_target_devs, ndevs_to_allocate;
3068         int raid_ctlr_position;
3069         int rescan_hba_mode;
3070         DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
3071
3072         currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
3073         physdev_list = kzalloc(sizeof(*physdev_list), GFP_KERNEL);
3074         logdev_list = kzalloc(sizeof(*logdev_list), GFP_KERNEL);
3075         tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
3076         id_phys = kzalloc(sizeof(*id_phys), GFP_KERNEL);
3077
3078         if (!currentsd || !physdev_list || !logdev_list ||
3079                 !tmpdevice || !id_phys) {
3080                 dev_err(&h->pdev->dev, "out of memory\n");
3081                 goto out;
3082         }
3083         memset(lunzerobits, 0, sizeof(lunzerobits));
3084
3085         rescan_hba_mode = hpsa_hba_mode_enabled(h);
3086         if (rescan_hba_mode < 0)
3087                 goto out;
3088
3089         if (!h->hba_mode_enabled && rescan_hba_mode)
3090                 dev_warn(&h->pdev->dev, "HBA mode enabled\n");
3091         else if (h->hba_mode_enabled && !rescan_hba_mode)
3092                 dev_warn(&h->pdev->dev, "HBA mode disabled\n");
3093
3094         h->hba_mode_enabled = rescan_hba_mode;
3095
3096         if (hpsa_gather_lun_info(h, physdev_list, &nphysicals,
3097                         logdev_list, &nlogicals))
3098                 goto out;
3099
3100         /* We might see up to the maximum number of logical and physical disks
3101          * plus external target devices, and a device for the local RAID
3102          * controller.
3103          */
3104         ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
3105
3106         /* Allocate the per device structures */
3107         for (i = 0; i < ndevs_to_allocate; i++) {
3108                 if (i >= HPSA_MAX_DEVICES) {
3109                         dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
3110                                 "  %d devices ignored.\n", HPSA_MAX_DEVICES,
3111                                 ndevs_to_allocate - HPSA_MAX_DEVICES);
3112                         break;
3113                 }
3114
3115                 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
3116                 if (!currentsd[i]) {
3117                         dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
3118                                 __FILE__, __LINE__);
3119                         goto out;
3120                 }
3121                 ndev_allocated++;
3122         }
3123
3124         if (is_scsi_rev_5(h))
3125                 raid_ctlr_position = 0;
3126         else
3127                 raid_ctlr_position = nphysicals + nlogicals;
3128
3129         /* adjust our table of devices */
3130         n_ext_target_devs = 0;
3131         for (i = 0; i < nphysicals + nlogicals + 1; i++) {
3132                 u8 *lunaddrbytes, is_OBDR = 0;
3133
3134                 /* Figure out where the LUN ID info is coming from */
3135                 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3136                         i, nphysicals, nlogicals, physdev_list, logdev_list);
3137                 /* skip masked physical devices. */
3138                 if (lunaddrbytes[3] & 0xC0 &&
3139                         i < nphysicals + (raid_ctlr_position == 0))
3140                         continue;
3141
3142                 /* Get device type, vendor, model, device id */
3143                 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3144                                                         &is_OBDR))
3145                         continue; /* skip it if we can't talk to it. */
3146                 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
3147                 this_device = currentsd[ncurrent];
3148
3149                 /*
3150                  * For external target devices, we have to insert a LUN 0 which
3151                  * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3152                  * is nonetheless an enclosure device there.  We have to
3153                  * present that otherwise linux won't find anything if
3154                  * there is no lun 0.
3155                  */
3156                 if (add_ext_target_dev(h, tmpdevice, this_device,
3157                                 lunaddrbytes, lunzerobits,
3158                                 &n_ext_target_devs)) {
3159                         ncurrent++;
3160                         this_device = currentsd[ncurrent];
3161                 }
3162
3163                 *this_device = *tmpdevice;
3164
3165                 switch (this_device->devtype) {
3166                 case TYPE_ROM:
3167                         /* We don't *really* support actual CD-ROM devices,
3168                          * just "One Button Disaster Recovery" tape drive
3169                          * which temporarily pretends to be a CD-ROM drive.
3170                          * So we check that the device is really an OBDR tape
3171                          * device by checking for "$DR-10" in bytes 43-48 of
3172                          * the inquiry data.
3173                          */
3174                         if (is_OBDR)
3175                                 ncurrent++;
3176                         break;
3177                 case TYPE_DISK:
3178                         if (h->hba_mode_enabled) {
3179                                 /* never use raid mapper in HBA mode */
3180                                 this_device->offload_enabled = 0;
3181                                 ncurrent++;
3182                                 break;
3183                         } else if (h->acciopath_status) {
3184                                 if (i >= nphysicals) {
3185                                         ncurrent++;
3186                                         break;
3187                                 }
3188                         } else {
3189                                 if (i < nphysicals)
3190                                         break;
3191                                 ncurrent++;
3192                                 break;
3193                         }
3194                         if (h->transMethod & CFGTBL_Trans_io_accel1 ||
3195                                 h->transMethod & CFGTBL_Trans_io_accel2) {
3196                                 hpsa_get_ioaccel_drive_info(h, this_device,
3197                                                         lunaddrbytes, id_phys);
3198                                 atomic_set(&this_device->ioaccel_cmds_out, 0);
3199                                 ncurrent++;
3200                         }
3201                         break;
3202                 case TYPE_TAPE:
3203                 case TYPE_MEDIUM_CHANGER:
3204                         ncurrent++;
3205                         break;
3206                 case TYPE_RAID:
3207                         /* Only present the Smartarray HBA as a RAID controller.
3208                          * If it's a RAID controller other than the HBA itself
3209                          * (an external RAID controller, MSA500 or similar)
3210                          * don't present it.
3211                          */
3212                         if (!is_hba_lunid(lunaddrbytes))
3213                                 break;
3214                         ncurrent++;
3215                         break;
3216                 default:
3217                         break;
3218                 }
3219                 if (ncurrent >= HPSA_MAX_DEVICES)
3220                         break;
3221         }
3222         hpsa_update_log_drive_phys_drive_ptrs(h, currentsd, ncurrent);
3223         adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3224 out:
3225         kfree(tmpdevice);
3226         for (i = 0; i < ndev_allocated; i++)
3227                 kfree(currentsd[i]);
3228         kfree(currentsd);
3229         kfree(physdev_list);
3230         kfree(logdev_list);
3231         kfree(id_phys);
3232 }
3233
3234 /*
3235  * hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3236  * dma mapping  and fills in the scatter gather entries of the
3237  * hpsa command, cp.
3238  */
3239 static int hpsa_scatter_gather(struct ctlr_info *h,
3240                 struct CommandList *cp,
3241                 struct scsi_cmnd *cmd)
3242 {
3243         unsigned int len;
3244         struct scatterlist *sg;
3245         u64 addr64;
3246         int use_sg, i, sg_index, chained;
3247         struct SGDescriptor *curr_sg;
3248
3249         BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
3250
3251         use_sg = scsi_dma_map(cmd);
3252         if (use_sg < 0)
3253                 return use_sg;
3254
3255         if (!use_sg)
3256                 goto sglist_finished;
3257
3258         curr_sg = cp->SG;
3259         chained = 0;
3260         sg_index = 0;
3261         scsi_for_each_sg(cmd, sg, use_sg, i) {
3262                 if (i == h->max_cmd_sg_entries - 1 &&
3263                         use_sg > h->max_cmd_sg_entries) {
3264                         chained = 1;
3265                         curr_sg = h->cmd_sg_list[cp->cmdindex];
3266                         sg_index = 0;
3267                 }
3268                 addr64 = (u64) sg_dma_address(sg);
3269                 len  = sg_dma_len(sg);
3270                 curr_sg->Addr = cpu_to_le64(addr64);
3271                 curr_sg->Len = cpu_to_le32(len);
3272                 curr_sg->Ext = cpu_to_le32(0);
3273                 curr_sg++;
3274         }
3275         (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3276
3277         if (use_sg + chained > h->maxSG)
3278                 h->maxSG = use_sg + chained;
3279
3280         if (chained) {
3281                 cp->Header.SGList = h->max_cmd_sg_entries;
3282                 cp->Header.SGTotal = cpu_to_le16(use_sg + 1);
3283                 if (hpsa_map_sg_chain_block(h, cp)) {
3284                         scsi_dma_unmap(cmd);
3285                         return -1;
3286                 }
3287                 return 0;
3288         }
3289
3290 sglist_finished:
3291
3292         cp->Header.SGList = (u8) use_sg;   /* no. SGs contig in this cmd */
3293         cp->Header.SGTotal = cpu_to_le16(use_sg); /* total sgs in cmd list */
3294         return 0;
3295 }
3296
3297 #define IO_ACCEL_INELIGIBLE (1)
3298 static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3299 {
3300         int is_write = 0;
3301         u32 block;
3302         u32 block_cnt;
3303
3304         /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3305         switch (cdb[0]) {
3306         case WRITE_6:
3307         case WRITE_12:
3308                 is_write = 1;
3309         case READ_6:
3310         case READ_12:
3311                 if (*cdb_len == 6) {
3312                         block = (((u32) cdb[2]) << 8) | cdb[3];
3313                         block_cnt = cdb[4];
3314                 } else {
3315                         BUG_ON(*cdb_len != 12);
3316                         block = (((u32) cdb[2]) << 24) |
3317                                 (((u32) cdb[3]) << 16) |
3318                                 (((u32) cdb[4]) << 8) |
3319                                 cdb[5];
3320                         block_cnt =
3321                                 (((u32) cdb[6]) << 24) |
3322                                 (((u32) cdb[7]) << 16) |
3323                                 (((u32) cdb[8]) << 8) |
3324                                 cdb[9];
3325                 }
3326                 if (block_cnt > 0xffff)
3327                         return IO_ACCEL_INELIGIBLE;
3328
3329                 cdb[0] = is_write ? WRITE_10 : READ_10;
3330                 cdb[1] = 0;
3331                 cdb[2] = (u8) (block >> 24);
3332                 cdb[3] = (u8) (block >> 16);
3333                 cdb[4] = (u8) (block >> 8);
3334                 cdb[5] = (u8) (block);
3335                 cdb[6] = 0;
3336                 cdb[7] = (u8) (block_cnt >> 8);
3337                 cdb[8] = (u8) (block_cnt);
3338                 cdb[9] = 0;
3339                 *cdb_len = 10;
3340                 break;
3341         }
3342         return 0;
3343 }
3344
3345 static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
3346         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3347         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3348 {
3349         struct scsi_cmnd *cmd = c->scsi_cmd;
3350         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3351         unsigned int len;
3352         unsigned int total_len = 0;
3353         struct scatterlist *sg;
3354         u64 addr64;
3355         int use_sg, i;
3356         struct SGDescriptor *curr_sg;
3357         u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3358
3359         /* TODO: implement chaining support */
3360         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3361                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3362                 return IO_ACCEL_INELIGIBLE;
3363         }
3364
3365         BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3366
3367         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3368                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3369                 return IO_ACCEL_INELIGIBLE;
3370         }
3371
3372         c->cmd_type = CMD_IOACCEL1;
3373
3374         /* Adjust the DMA address to point to the accelerated command buffer */
3375         c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3376                                 (c->cmdindex * sizeof(*cp));
3377         BUG_ON(c->busaddr & 0x0000007F);
3378
3379         use_sg = scsi_dma_map(cmd);
3380         if (use_sg < 0) {
3381                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3382                 return use_sg;
3383         }
3384
3385         if (use_sg) {
3386                 curr_sg = cp->SG;
3387                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3388                         addr64 = (u64) sg_dma_address(sg);
3389                         len  = sg_dma_len(sg);
3390                         total_len += len;
3391                         curr_sg->Addr = cpu_to_le64(addr64);
3392                         curr_sg->Len = cpu_to_le32(len);
3393                         curr_sg->Ext = cpu_to_le32(0);
3394                         curr_sg++;
3395                 }
3396                 (--curr_sg)->Ext = cpu_to_le32(HPSA_SG_LAST);
3397
3398                 switch (cmd->sc_data_direction) {
3399                 case DMA_TO_DEVICE:
3400                         control |= IOACCEL1_CONTROL_DATA_OUT;
3401                         break;
3402                 case DMA_FROM_DEVICE:
3403                         control |= IOACCEL1_CONTROL_DATA_IN;
3404                         break;
3405                 case DMA_NONE:
3406                         control |= IOACCEL1_CONTROL_NODATAXFER;
3407                         break;
3408                 default:
3409                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3410                         cmd->sc_data_direction);
3411                         BUG();
3412                         break;
3413                 }
3414         } else {
3415                 control |= IOACCEL1_CONTROL_NODATAXFER;
3416         }
3417
3418         c->Header.SGList = use_sg;
3419         /* Fill out the command structure to submit */
3420         cp->dev_handle = cpu_to_le16(ioaccel_handle & 0xFFFF);
3421         cp->transfer_len = cpu_to_le32(total_len);
3422         cp->io_flags = cpu_to_le16(IOACCEL1_IOFLAGS_IO_REQ |
3423                         (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK));
3424         cp->control = cpu_to_le32(control);
3425         memcpy(cp->CDB, cdb, cdb_len);
3426         memcpy(cp->CISS_LUN, scsi3addr, 8);
3427         /* Tag was already set at init time. */
3428         enqueue_cmd_and_start_io(h, c);
3429         return 0;
3430 }
3431
3432 /*
3433  * Queue a command directly to a device behind the controller using the
3434  * I/O accelerator path.
3435  */
3436 static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3437         struct CommandList *c)
3438 {
3439         struct scsi_cmnd *cmd = c->scsi_cmd;
3440         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3441
3442         c->phys_disk = dev;
3443
3444         return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3445                 cmd->cmnd, cmd->cmd_len, dev->scsi3addr, dev);
3446 }
3447
3448 /*
3449  * Set encryption parameters for the ioaccel2 request
3450  */
3451 static void set_encrypt_ioaccel2(struct ctlr_info *h,
3452         struct CommandList *c, struct io_accel2_cmd *cp)
3453 {
3454         struct scsi_cmnd *cmd = c->scsi_cmd;
3455         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3456         struct raid_map_data *map = &dev->raid_map;
3457         u64 first_block;
3458
3459         BUG_ON(!(dev->offload_config && dev->offload_enabled));
3460
3461         /* Are we doing encryption on this device */
3462         if (!(le16_to_cpu(map->flags) & RAID_MAP_FLAG_ENCRYPT_ON))
3463                 return;
3464         /* Set the data encryption key index. */
3465         cp->dekindex = map->dekindex;
3466
3467         /* Set the encryption enable flag, encoded into direction field. */
3468         cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3469
3470         /* Set encryption tweak values based on logical block address
3471          * If block size is 512, tweak value is LBA.
3472          * For other block sizes, tweak is (LBA * block size)/ 512)
3473          */
3474         switch (cmd->cmnd[0]) {
3475         /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3476         case WRITE_6:
3477         case READ_6:
3478                 first_block = get_unaligned_be16(&cmd->cmnd[2]);
3479                 break;
3480         case WRITE_10:
3481         case READ_10:
3482         /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3483         case WRITE_12:
3484         case READ_12:
3485                 first_block = get_unaligned_be32(&cmd->cmnd[2]);
3486                 break;
3487         case WRITE_16:
3488         case READ_16:
3489                 first_block = get_unaligned_be64(&cmd->cmnd[2]);
3490                 break;
3491         default:
3492                 dev_err(&h->pdev->dev,
3493                         "ERROR: %s: size (0x%x) not supported for encryption\n",
3494                         __func__, cmd->cmnd[0]);
3495                 BUG();
3496                 break;
3497         }
3498
3499         if (le32_to_cpu(map->volume_blk_size) != 512)
3500                 first_block = first_block *
3501                                 le32_to_cpu(map->volume_blk_size)/512;
3502
3503         cp->tweak_lower = cpu_to_le32(first_block);
3504         cp->tweak_upper = cpu_to_le32(first_block >> 32);
3505 }
3506
3507 static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3508         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3509         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3510 {
3511         struct scsi_cmnd *cmd = c->scsi_cmd;
3512         struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3513         struct ioaccel2_sg_element *curr_sg;
3514         int use_sg, i;
3515         struct scatterlist *sg;
3516         u64 addr64;
3517         u32 len;
3518         u32 total_len = 0;
3519
3520         if (scsi_sg_count(cmd) > h->ioaccel_maxsg) {
3521                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3522                 return IO_ACCEL_INELIGIBLE;
3523         }
3524
3525         if (fixup_ioaccel_cdb(cdb, &cdb_len)) {
3526                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3527                 return IO_ACCEL_INELIGIBLE;
3528         }
3529
3530         c->cmd_type = CMD_IOACCEL2;
3531         /* Adjust the DMA address to point to the accelerated command buffer */
3532         c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3533                                 (c->cmdindex * sizeof(*cp));
3534         BUG_ON(c->busaddr & 0x0000007F);
3535
3536         memset(cp, 0, sizeof(*cp));
3537         cp->IU_type = IOACCEL2_IU_TYPE;
3538
3539         use_sg = scsi_dma_map(cmd);
3540         if (use_sg < 0) {
3541                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3542                 return use_sg;
3543         }
3544
3545         if (use_sg) {
3546                 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3547                 curr_sg = cp->sg;
3548                 scsi_for_each_sg(cmd, sg, use_sg, i) {
3549                         addr64 = (u64) sg_dma_address(sg);
3550                         len  = sg_dma_len(sg);
3551                         total_len += len;
3552                         curr_sg->address = cpu_to_le64(addr64);
3553                         curr_sg->length = cpu_to_le32(len);
3554                         curr_sg->reserved[0] = 0;
3555                         curr_sg->reserved[1] = 0;
3556                         curr_sg->reserved[2] = 0;
3557                         curr_sg->chain_indicator = 0;
3558                         curr_sg++;
3559                 }
3560
3561                 switch (cmd->sc_data_direction) {
3562                 case DMA_TO_DEVICE:
3563                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3564                         cp->direction |= IOACCEL2_DIR_DATA_OUT;
3565                         break;
3566                 case DMA_FROM_DEVICE:
3567                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3568                         cp->direction |= IOACCEL2_DIR_DATA_IN;
3569                         break;
3570                 case DMA_NONE:
3571                         cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3572                         cp->direction |= IOACCEL2_DIR_NO_DATA;
3573                         break;
3574                 default:
3575                         dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3576                                 cmd->sc_data_direction);
3577                         BUG();
3578                         break;
3579                 }
3580         } else {
3581                 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3582                 cp->direction |= IOACCEL2_DIR_NO_DATA;
3583         }
3584
3585         /* Set encryption parameters, if necessary */
3586         set_encrypt_ioaccel2(h, c, cp);
3587
3588         cp->scsi_nexus = cpu_to_le32(ioaccel_handle);
3589         cp->Tag = cpu_to_le32(c->cmdindex << DIRECT_LOOKUP_SHIFT);
3590         memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3591
3592         /* fill in sg elements */
3593         cp->sg_count = (u8) use_sg;
3594
3595         cp->data_len = cpu_to_le32(total_len);
3596         cp->err_ptr = cpu_to_le64(c->busaddr +
3597                         offsetof(struct io_accel2_cmd, error_data));
3598         cp->err_len = cpu_to_le32(sizeof(cp->error_data));
3599
3600         enqueue_cmd_and_start_io(h, c);
3601         return 0;
3602 }
3603
3604 /*
3605  * Queue a command to the correct I/O accelerator path.
3606  */
3607 static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3608         struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3609         u8 *scsi3addr, struct hpsa_scsi_dev_t *phys_disk)
3610 {
3611         /* Try to honor the device's queue depth */
3612         if (atomic_inc_return(&phys_disk->ioaccel_cmds_out) >
3613                                         phys_disk->queue_depth) {
3614                 atomic_dec(&phys_disk->ioaccel_cmds_out);
3615                 return IO_ACCEL_INELIGIBLE;
3616         }
3617         if (h->transMethod & CFGTBL_Trans_io_accel1)
3618                 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3619                                                 cdb, cdb_len, scsi3addr,
3620                                                 phys_disk);
3621         else
3622                 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3623                                                 cdb, cdb_len, scsi3addr,
3624                                                 phys_disk);
3625 }
3626
3627 static void raid_map_helper(struct raid_map_data *map,
3628                 int offload_to_mirror, u32 *map_index, u32 *current_group)
3629 {
3630         if (offload_to_mirror == 0)  {
3631                 /* use physical disk in the first mirrored group. */
3632                 *map_index %= le16_to_cpu(map->data_disks_per_row);
3633                 return;
3634         }
3635         do {
3636                 /* determine mirror group that *map_index indicates */
3637                 *current_group = *map_index /
3638                         le16_to_cpu(map->data_disks_per_row);
3639                 if (offload_to_mirror == *current_group)
3640                         continue;
3641                 if (*current_group < le16_to_cpu(map->layout_map_count) - 1) {
3642                         /* select map index from next group */
3643                         *map_index += le16_to_cpu(map->data_disks_per_row);
3644                         (*current_group)++;
3645                 } else {
3646                         /* select map index from first group */
3647                         *map_index %= le16_to_cpu(map->data_disks_per_row);
3648                         *current_group = 0;
3649                 }
3650         } while (offload_to_mirror != *current_group);
3651 }
3652
3653 /*
3654  * Attempt to perform offload RAID mapping for a logical volume I/O.
3655  */
3656 static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3657         struct CommandList *c)
3658 {
3659         struct scsi_cmnd *cmd = c->scsi_cmd;
3660         struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3661         struct raid_map_data *map = &dev->raid_map;
3662         struct raid_map_disk_data *dd = &map->data[0];
3663         int is_write = 0;
3664         u32 map_index;
3665         u64 first_block, last_block;
3666         u32 block_cnt;
3667         u32 blocks_per_row;
3668         u64 first_row, last_row;
3669         u32 first_row_offset, last_row_offset;
3670         u32 first_column, last_column;
3671         u64 r0_first_row, r0_last_row;
3672         u32 r5or6_blocks_per_row;
3673         u64 r5or6_first_row, r5or6_last_row;
3674         u32 r5or6_first_row_offset, r5or6_last_row_offset;
3675         u32 r5or6_first_column, r5or6_last_column;
3676         u32 total_disks_per_row;
3677         u32 stripesize;
3678         u32 first_group, last_group, current_group;
3679         u32 map_row;
3680         u32 disk_handle;
3681         u64 disk_block;
3682         u32 disk_block_cnt;
3683         u8 cdb[16];
3684         u8 cdb_len;
3685         u16 strip_size;
3686 #if BITS_PER_LONG == 32
3687         u64 tmpdiv;
3688 #endif
3689         int offload_to_mirror;
3690
3691         BUG_ON(!(dev->offload_config && dev->offload_enabled));
3692
3693         /* check for valid opcode, get LBA and block count */
3694         switch (cmd->cmnd[0]) {
3695         case WRITE_6:
3696                 is_write = 1;
3697         case READ_6:
3698                 first_block =
3699                         (((u64) cmd->cmnd[2]) << 8) |
3700                         cmd->cmnd[3];
3701                 block_cnt = cmd->cmnd[4];
3702                 if (block_cnt == 0)
3703                         block_cnt = 256;
3704                 break;
3705         case WRITE_10:
3706                 is_write = 1;
3707         case READ_10:
3708                 first_block =
3709                         (((u64) cmd->cmnd[2]) << 24) |
3710                         (((u64) cmd->cmnd[3]) << 16) |
3711                         (((u64) cmd->cmnd[4]) << 8) |
3712                         cmd->cmnd[5];
3713                 block_cnt =
3714                         (((u32) cmd->cmnd[7]) << 8) |
3715                         cmd->cmnd[8];
3716                 break;
3717         case WRITE_12:
3718                 is_write = 1;
3719         case READ_12:
3720                 first_block =
3721                         (((u64) cmd->cmnd[2]) << 24) |
3722                         (((u64) cmd->cmnd[3]) << 16) |
3723                         (((u64) cmd->cmnd[4]) << 8) |
3724                         cmd->cmnd[5];
3725                 block_cnt =
3726                         (((u32) cmd->cmnd[6]) << 24) |
3727                         (((u32) cmd->cmnd[7]) << 16) |
3728                         (((u32) cmd->cmnd[8]) << 8) |
3729                 cmd->cmnd[9];
3730                 break;
3731         case WRITE_16:
3732                 is_write = 1;
3733         case READ_16:
3734                 first_block =
3735                         (((u64) cmd->cmnd[2]) << 56) |
3736                         (((u64) cmd->cmnd[3]) << 48) |
3737                         (((u64) cmd->cmnd[4]) << 40) |
3738                         (((u64) cmd->cmnd[5]) << 32) |
3739                         (((u64) cmd->cmnd[6]) << 24) |
3740                         (((u64) cmd->cmnd[7]) << 16) |
3741                         (((u64) cmd->cmnd[8]) << 8) |
3742                         cmd->cmnd[9];
3743                 block_cnt =
3744                         (((u32) cmd->cmnd[10]) << 24) |
3745                         (((u32) cmd->cmnd[11]) << 16) |
3746                         (((u32) cmd->cmnd[12]) << 8) |
3747                         cmd->cmnd[13];
3748                 break;
3749         default:
3750                 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3751         }
3752         last_block = first_block + block_cnt - 1;
3753
3754         /* check for write to non-RAID-0 */
3755         if (is_write && dev->raid_level != 0)
3756                 return IO_ACCEL_INELIGIBLE;
3757
3758         /* check for invalid block or wraparound */
3759         if (last_block >= le64_to_cpu(map->volume_blk_cnt) ||
3760                 last_block < first_block)
3761                 return IO_ACCEL_INELIGIBLE;
3762
3763         /* calculate stripe information for the request */
3764         blocks_per_row = le16_to_cpu(map->data_disks_per_row) *
3765                                 le16_to_cpu(map->strip_size);
3766         strip_size = le16_to_cpu(map->strip_size);
3767 #if BITS_PER_LONG == 32
3768         tmpdiv = first_block;
3769         (void) do_div(tmpdiv, blocks_per_row);
3770         first_row = tmpdiv;
3771         tmpdiv = last_block;
3772         (void) do_div(tmpdiv, blocks_per_row);
3773         last_row = tmpdiv;
3774         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3775         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3776         tmpdiv = first_row_offset;
3777         (void) do_div(tmpdiv, strip_size);
3778         first_column = tmpdiv;
3779         tmpdiv = last_row_offset;
3780         (void) do_div(tmpdiv, strip_size);
3781         last_column = tmpdiv;
3782 #else
3783         first_row = first_block / blocks_per_row;
3784         last_row = last_block / blocks_per_row;
3785         first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3786         last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3787         first_column = first_row_offset / strip_size;
3788         last_column = last_row_offset / strip_size;
3789 #endif
3790
3791         /* if this isn't a single row/column then give to the controller */
3792         if ((first_row != last_row) || (first_column != last_column))
3793                 return IO_ACCEL_INELIGIBLE;
3794
3795         /* proceeding with driver mapping */
3796         total_disks_per_row = le16_to_cpu(map->data_disks_per_row) +
3797                                 le16_to_cpu(map->metadata_disks_per_row);
3798         map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3799                                 le16_to_cpu(map->row_cnt);
3800         map_index = (map_row * total_disks_per_row) + first_column;
3801
3802         switch (dev->raid_level) {
3803         case HPSA_RAID_0:
3804                 break; /* nothing special to do */
3805         case HPSA_RAID_1:
3806                 /* Handles load balance across RAID 1 members.
3807                  * (2-drive R1 and R10 with even # of drives.)
3808                  * Appropriate for SSDs, not optimal for HDDs
3809                  */
3810                 BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
3811                 if (dev->offload_to_mirror)
3812                         map_index += le16_to_cpu(map->data_disks_per_row);
3813                 dev->offload_to_mirror = !dev->offload_to_mirror;
3814                 break;
3815         case HPSA_RAID_ADM:
3816                 /* Handles N-way mirrors  (R1-ADM)
3817                  * and R10 with # of drives divisible by 3.)
3818                  */
3819                 BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
3820
3821                 offload_to_mirror = dev->offload_to_mirror;
3822                 raid_map_helper(map, offload_to_mirror,
3823                                 &map_index, &current_group);
3824                 /* set mirror group to use next time */
3825                 offload_to_mirror =
3826                         (offload_to_mirror >=
3827                         le16_to_cpu(map->layout_map_count) - 1)
3828                         ? 0 : offload_to_mirror + 1;
3829                 dev->offload_to_mirror = offload_to_mirror;
3830                 /* Avoid direct use of dev->offload_to_mirror within this
3831                  * function since multiple threads might simultaneously
3832                  * increment it beyond the range of dev->layout_map_count -1.
3833                  */
3834                 break;
3835         case HPSA_RAID_5:
3836         case HPSA_RAID_6:
3837                 if (le16_to_cpu(map->layout_map_count) <= 1)
3838                         break;
3839
3840                 /* Verify first and last block are in same RAID group */
3841                 r5or6_blocks_per_row =
3842                         le16_to_cpu(map->strip_size) *
3843                         le16_to_cpu(map->data_disks_per_row);
3844                 BUG_ON(r5or6_blocks_per_row == 0);
3845                 stripesize = r5or6_blocks_per_row *
3846                         le16_to_cpu(map->layout_map_count);
3847 #if BITS_PER_LONG == 32
3848                 tmpdiv = first_block;
3849                 first_group = do_div(tmpdiv, stripesize);
3850                 tmpdiv = first_group;
3851                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3852                 first_group = tmpdiv;
3853                 tmpdiv = last_block;
3854                 last_group = do_div(tmpdiv, stripesize);
3855                 tmpdiv = last_group;
3856                 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3857                 last_group = tmpdiv;
3858 #else
3859                 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3860                 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3861 #endif
3862                 if (first_group != last_group)
3863                         return IO_ACCEL_INELIGIBLE;
3864
3865                 /* Verify request is in a single row of RAID 5/6 */
3866 #if BITS_PER_LONG == 32
3867                 tmpdiv = first_block;
3868                 (void) do_div(tmpdiv, stripesize);
3869                 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3870                 tmpdiv = last_block;
3871                 (void) do_div(tmpdiv, stripesize);
3872                 r5or6_last_row = r0_last_row = tmpdiv;
3873 #else
3874                 first_row = r5or6_first_row = r0_first_row =
3875                                                 first_block / stripesize;
3876                 r5or6_last_row = r0_last_row = last_block / stripesize;
3877 #endif
3878                 if (r5or6_first_row != r5or6_last_row)
3879                         return IO_ACCEL_INELIGIBLE;
3880
3881
3882                 /* Verify request is in a single column */
3883 #if BITS_PER_LONG == 32
3884                 tmpdiv = first_block;
3885                 first_row_offset = do_div(tmpdiv, stripesize);
3886                 tmpdiv = first_row_offset;
3887                 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3888                 r5or6_first_row_offset = first_row_offset;
3889                 tmpdiv = last_block;
3890                 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3891                 tmpdiv = r5or6_last_row_offset;
3892                 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3893                 tmpdiv = r5or6_first_row_offset;
3894                 (void) do_div(tmpdiv, map->strip_size);
3895                 first_column = r5or6_first_column = tmpdiv;
3896                 tmpdiv = r5or6_last_row_offset;
3897                 (void) do_div(tmpdiv, map->strip_size);
3898                 r5or6_last_column = tmpdiv;
3899 #else
3900                 first_row_offset = r5or6_first_row_offset =
3901                         (u32)((first_block % stripesize) %
3902                                                 r5or6_blocks_per_row);
3903
3904                 r5or6_last_row_offset =
3905                         (u32)((last_block % stripesize) %
3906                                                 r5or6_blocks_per_row);
3907
3908                 first_column = r5or6_first_column =
3909                         r5or6_first_row_offset / le16_to_cpu(map->strip_size);
3910                 r5or6_last_column =
3911                         r5or6_last_row_offset / le16_to_cpu(map->strip_size);
3912 #endif
3913                 if (r5or6_first_column != r5or6_last_column)
3914                         return IO_ACCEL_INELIGIBLE;
3915
3916                 /* Request is eligible */
3917                 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3918                         le16_to_cpu(map->row_cnt);
3919
3920                 map_index = (first_group *
3921                         (le16_to_cpu(map->row_cnt) * total_disks_per_row)) +
3922                         (map_row * total_disks_per_row) + first_column;
3923                 break;
3924         default:
3925                 return IO_ACCEL_INELIGIBLE;
3926         }
3927
3928         c->phys_disk = dev->phys_disk[map_index];
3929
3930         disk_handle = dd[map_index].ioaccel_handle;
3931         disk_block = le64_to_cpu(map->disk_starting_blk) +
3932                         first_row * le16_to_cpu(map->strip_size) +
3933                         (first_row_offset - first_column *
3934                         le16_to_cpu(map->strip_size));
3935         disk_block_cnt = block_cnt;
3936
3937         /* handle differing logical/physical block sizes */
3938         if (map->phys_blk_shift) {
3939                 disk_block <<= map->phys_blk_shift;
3940                 disk_block_cnt <<= map->phys_blk_shift;
3941         }
3942         BUG_ON(disk_block_cnt > 0xffff);
3943
3944         /* build the new CDB for the physical disk I/O */
3945         if (disk_block > 0xffffffff) {
3946                 cdb[0] = is_write ? WRITE_16 : READ_16;
3947                 cdb[1] = 0;
3948                 cdb[2] = (u8) (disk_block >> 56);
3949                 cdb[3] = (u8) (disk_block >> 48);
3950                 cdb[4] = (u8) (disk_block >> 40);
3951                 cdb[5] = (u8) (disk_block >> 32);
3952                 cdb[6] = (u8) (disk_block >> 24);
3953                 cdb[7] = (u8) (disk_block >> 16);
3954                 cdb[8] = (u8) (disk_block >> 8);
3955                 cdb[9] = (u8) (disk_block);
3956                 cdb[10] = (u8) (disk_block_cnt >> 24);
3957                 cdb[11] = (u8) (disk_block_cnt >> 16);
3958                 cdb[12] = (u8) (disk_block_cnt >> 8);
3959                 cdb[13] = (u8) (disk_block_cnt);
3960                 cdb[14] = 0;
3961                 cdb[15] = 0;
3962                 cdb_len = 16;
3963         } else {
3964                 cdb[0] = is_write ? WRITE_10 : READ_10;
3965                 cdb[1] = 0;
3966                 cdb[2] = (u8) (disk_block >> 24);
3967                 cdb[3] = (u8) (disk_block >> 16);
3968                 cdb[4] = (u8) (disk_block >> 8);
3969                 cdb[5] = (u8) (disk_block);
3970                 cdb[6] = 0;
3971                 cdb[7] = (u8) (disk_block_cnt >> 8);
3972                 cdb[8] = (u8) (disk_block_cnt);
3973                 cdb[9] = 0;
3974                 cdb_len = 10;
3975         }
3976         return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3977                                                 dev->scsi3addr,
3978                                                 dev->phys_disk[map_index]);
3979 }
3980
3981 /* Submit commands down the "normal" RAID stack path */
3982 static int hpsa_ciss_submit(struct ctlr_info *h,
3983         struct CommandList *c, struct scsi_cmnd *cmd,
3984         unsigned char scsi3addr[])
3985 {
3986         cmd->host_scribble = (unsigned char *) c;
3987         c->cmd_type = CMD_SCSI;
3988         c->scsi_cmd = cmd;
3989         c->Header.ReplyQueue = 0;  /* unused in simple mode */
3990         memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
3991         c->Header.tag = cpu_to_le64((c->cmdindex << DIRECT_LOOKUP_SHIFT));
3992
3993         /* Fill in the request block... */
3994
3995         c->Request.Timeout = 0;
3996         memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
3997         BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
3998         c->Request.CDBLen = cmd->cmd_len;
3999         memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
4000         switch (cmd->sc_data_direction) {
4001         case DMA_TO_DEVICE:
4002                 c->Request.type_attr_dir =
4003                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_WRITE);
4004                 break;
4005         case DMA_FROM_DEVICE:
4006                 c->Request.type_attr_dir =
4007                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_READ);
4008                 break;
4009         case DMA_NONE:
4010                 c->Request.type_attr_dir =
4011                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_NONE);
4012                 break;
4013         case DMA_BIDIRECTIONAL:
4014                 /* This can happen if a buggy application does a scsi passthru
4015                  * and sets both inlen and outlen to non-zero. ( see
4016                  * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
4017                  */
4018
4019                 c->Request.type_attr_dir =
4020                         TYPE_ATTR_DIR(TYPE_CMD, ATTR_SIMPLE, XFER_RSVD);
4021                 /* This is technically wrong, and hpsa controllers should
4022                  * reject it with CMD_INVALID, which is the most correct
4023                  * response, but non-fibre backends appear to let it
4024                  * slide by, and give the same results as if this field
4025                  * were set correctly.  Either way is acceptable for
4026                  * our purposes here.
4027                  */
4028
4029                 break;
4030
4031         default:
4032                 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
4033                         cmd->sc_data_direction);
4034                 BUG();
4035                 break;
4036         }
4037
4038         if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
4039                 cmd_free(h, c);
4040                 return SCSI_MLQUEUE_HOST_BUSY;
4041         }
4042         enqueue_cmd_and_start_io(h, c);
4043         /* the cmd'll come back via intr handler in complete_scsi_command()  */
4044         return 0;
4045 }
4046
4047 static void hpsa_command_resubmit_worker(struct work_struct *work)
4048 {
4049         struct scsi_cmnd *cmd;
4050         struct hpsa_scsi_dev_t *dev;
4051         struct CommandList *c =
4052                         container_of(work, struct CommandList, work);
4053
4054         cmd = c->scsi_cmd;
4055         dev = cmd->device->hostdata;
4056         if (!dev) {
4057                 cmd->result = DID_NO_CONNECT << 16;
4058                 cmd->scsi_done(cmd);
4059                 return;
4060         }
4061         if (hpsa_ciss_submit(c->h, c, cmd, dev->scsi3addr)) {
4062                 /*
4063                  * If we get here, it means dma mapping failed. Try
4064                  * again via scsi mid layer, which will then get
4065                  * SCSI_MLQUEUE_HOST_BUSY.
4066                  */
4067                 cmd->result = DID_IMM_RETRY << 16;
4068                 cmd->scsi_done(cmd);
4069         }
4070 }
4071
4072 /* Running in struct Scsi_Host->host_lock less mode */
4073 static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
4074 {
4075         struct ctlr_info *h;
4076         struct hpsa_scsi_dev_t *dev;
4077         unsigned char scsi3addr[8];
4078         struct CommandList *c;
4079         int rc = 0;
4080
4081         /* Get the ptr to our adapter structure out of cmd->host. */
4082         h = sdev_to_hba(cmd->device);
4083         dev = cmd->device->hostdata;
4084         if (!dev) {
4085                 cmd->result = DID_NO_CONNECT << 16;
4086                 cmd->scsi_done(cmd);
4087                 return 0;
4088         }
4089         memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
4090
4091         if (unlikely(lockup_detected(h))) {
4092                 cmd->result = DID_ERROR << 16;
4093                 cmd->scsi_done(cmd);
4094                 return 0;
4095         }
4096         c = cmd_alloc(h);
4097         if (c == NULL) {                        /* trouble... */
4098                 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4099                 return SCSI_MLQUEUE_HOST_BUSY;
4100         }
4101
4102         /* Call alternate submit routine for I/O accelerated commands.
4103          * Retries always go down the normal I/O path.
4104          */
4105         if (likely(cmd->retries == 0 &&
4106                 cmd->request->cmd_type == REQ_TYPE_FS &&
4107                 h->acciopath_status)) {
4108
4109                 cmd->host_scribble = (unsigned char *) c;
4110                 c->cmd_type = CMD_SCSI;
4111                 c->scsi_cmd = cmd;
4112
4113                 if (dev->offload_enabled) {
4114                         rc = hpsa_scsi_ioaccel_raid_map(h, c);
4115                         if (rc == 0)
4116                                 return 0; /* Sent on ioaccel path */
4117                         if (rc < 0) {   /* scsi_dma_map failed. */
4118                                 cmd_free(h, c);
4119                                 return SCSI_MLQUEUE_HOST_BUSY;
4120                         }
4121                 } else if (dev->ioaccel_handle) {
4122                         rc = hpsa_scsi_ioaccel_direct_map(h, c);
4123                         if (rc == 0)
4124                                 return 0; /* Sent on direct map path */
4125                         if (rc < 0) {   /* scsi_dma_map failed. */
4126                                 cmd_free(h, c);
4127                                 return SCSI_MLQUEUE_HOST_BUSY;
4128                         }
4129                 }
4130         }
4131         return hpsa_ciss_submit(h, c, cmd, scsi3addr);
4132 }
4133
4134 static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4135 {
4136         unsigned long flags;
4137
4138         /*
4139          * Don't let rescans be initiated on a controller known
4140          * to be locked up.  If the controller locks up *during*
4141          * a rescan, that thread is probably hosed, but at least
4142          * we can prevent new rescan threads from piling up on a
4143          * locked up controller.
4144          */
4145         if (unlikely(lockup_detected(h))) {
4146                 spin_lock_irqsave(&h->scan_lock, flags);
4147                 h->scan_finished = 1;
4148                 wake_up_all(&h->scan_wait_queue);
4149                 spin_unlock_irqrestore(&h->scan_lock, flags);
4150                 return 1;
4151         }
4152         return 0;
4153 }
4154
4155 static void hpsa_scan_start(struct Scsi_Host *sh)
4156 {
4157         struct ctlr_info *h = shost_to_hba(sh);
4158         unsigned long flags;
4159
4160         if (do_not_scan_if_controller_locked_up(h))
4161                 return;
4162
4163         /* wait until any scan already in progress is finished. */
4164         while (1) {
4165                 spin_lock_irqsave(&h->scan_lock, flags);
4166                 if (h->scan_finished)
4167                         break;
4168                 spin_unlock_irqrestore(&h->scan_lock, flags);
4169                 wait_event(h->scan_wait_queue, h->scan_finished);
4170                 /* Note: We don't need to worry about a race between this
4171                  * thread and driver unload because the midlayer will
4172                  * have incremented the reference count, so unload won't
4173                  * happen if we're in here.
4174                  */
4175         }
4176         h->scan_finished = 0; /* mark scan as in progress */
4177         spin_unlock_irqrestore(&h->scan_lock, flags);
4178
4179         if (do_not_scan_if_controller_locked_up(h))
4180                 return;
4181
4182         hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4183
4184         spin_lock_irqsave(&h->scan_lock, flags);
4185         h->scan_finished = 1; /* mark scan as finished. */
4186         wake_up_all(&h->scan_wait_queue);
4187         spin_unlock_irqrestore(&h->scan_lock, flags);
4188 }
4189
4190 static int hpsa_change_queue_depth(struct scsi_device *sdev, int qdepth)
4191 {
4192         struct hpsa_scsi_dev_t *logical_drive = sdev->hostdata;
4193
4194         if (!logical_drive)
4195                 return -ENODEV;
4196
4197         if (qdepth < 1)
4198                 qdepth = 1;
4199         else if (qdepth > logical_drive->queue_depth)
4200                 qdepth = logical_drive->queue_depth;
4201
4202         return scsi_change_queue_depth(sdev, qdepth);
4203 }
4204
4205 static int hpsa_scan_finished(struct Scsi_Host *sh,
4206         unsigned long elapsed_time)
4207 {
4208         struct ctlr_info *h = shost_to_hba(sh);
4209         unsigned long flags;
4210         int finished;
4211
4212         spin_lock_irqsave(&h->scan_lock, flags);
4213         finished = h->scan_finished;
4214         spin_unlock_irqrestore(&h->scan_lock, flags);
4215         return finished;
4216 }
4217
4218 static void hpsa_unregister_scsi(struct ctlr_info *h)
4219 {
4220         /* we are being forcibly unloaded, and may not refuse. */
4221         scsi_remove_host(h->scsi_host);
4222         scsi_host_put(h->scsi_host);
4223         h->scsi_host = NULL;
4224 }
4225
4226 static int hpsa_register_scsi(struct ctlr_info *h)
4227 {
4228         struct Scsi_Host *sh;
4229         int error;
4230
4231         sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4232         if (sh == NULL)
4233                 goto fail;
4234
4235         sh->io_port = 0;
4236         sh->n_io_port = 0;
4237         sh->this_id = -1;
4238         sh->max_channel = 3;
4239         sh->max_cmd_len = MAX_COMMAND_SIZE;
4240         sh->max_lun = HPSA_MAX_LUN;
4241         sh->max_id = HPSA_MAX_LUN;
4242         sh->can_queue = h->nr_cmds -
4243                         HPSA_CMDS_RESERVED_FOR_ABORTS -
4244                         HPSA_CMDS_RESERVED_FOR_DRIVER -
4245                         HPSA_MAX_CONCURRENT_PASSTHRUS;
4246         sh->cmd_per_lun = sh->can_queue;
4247         sh->sg_tablesize = h->maxsgentries;
4248         h->scsi_host = sh;
4249         sh->hostdata[0] = (unsigned long) h;
4250         sh->irq = h->intr[h->intr_mode];
4251         sh->unique_id = sh->irq;
4252         error = scsi_add_host(sh, &h->pdev->dev);
4253         if (error)
4254                 goto fail_host_put;
4255         scsi_scan_host(sh);
4256         return 0;
4257
4258  fail_host_put:
4259         dev_err(&h->pdev->dev, "%s: scsi_add_host"
4260                 " failed for controller %d\n", __func__, h->ctlr);
4261         scsi_host_put(sh);
4262         return error;
4263  fail:
4264         dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4265                 " failed for controller %d\n", __func__, h->ctlr);
4266         return -ENOMEM;
4267 }
4268
4269 static int wait_for_device_to_become_ready(struct ctlr_info *h,
4270         unsigned char lunaddr[])
4271 {
4272         int rc;
4273         int count = 0;
4274         int waittime = 1; /* seconds */
4275         struct CommandList *c;
4276
4277         c = cmd_alloc(h);
4278         if (!c) {
4279                 dev_warn(&h->pdev->dev, "out of memory in "
4280                         "wait_for_device_to_become_ready.\n");
4281                 return IO_ERROR;
4282         }
4283
4284         /* Send test unit ready until device ready, or give up. */
4285         while (count < HPSA_TUR_RETRY_LIMIT) {
4286
4287                 /* Wait for a bit.  do this first, because if we send
4288                  * the TUR right away, the reset will just abort it.
4289                  */
4290                 msleep(1000 * waittime);
4291                 count++;
4292                 rc = 0; /* Device ready. */
4293
4294                 /* Increase wait time with each try, up to a point. */
4295                 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4296                         waittime = waittime * 2;
4297
4298                 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4299                 (void) fill_cmd(c, TEST_UNIT_READY, h,
4300                                 NULL, 0, 0, lunaddr, TYPE_CMD);
4301                 hpsa_scsi_do_simple_cmd_core(h, c);
4302                 /* no unmap needed here because no data xfer. */
4303
4304                 if (c->err_info->CommandStatus == CMD_SUCCESS)
4305                         break;
4306
4307                 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4308                         c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4309                         (c->err_info->SenseInfo[2] == NO_SENSE ||
4310                         c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4311                         break;
4312
4313                 dev_warn(&h->pdev->dev, "waiting %d secs "
4314                         "for device to become ready.\n", waittime);
4315                 rc = 1; /* device not ready. */
4316         }
4317
4318         if (rc)
4319                 dev_warn(&h->pdev->dev, "giving up on device.\n");
4320         else
4321                 dev_warn(&h->pdev->dev, "device is ready.\n");
4322
4323         cmd_free(h, c);
4324         return rc;
4325 }
4326
4327 /* Need at least one of these error handlers to keep ../scsi/hosts.c from
4328  * complaining.  Doing a host- or bus-reset can't do anything good here.
4329  */
4330 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4331 {
4332         int rc;
4333         struct ctlr_info *h;
4334         struct hpsa_scsi_dev_t *dev;
4335
4336         /* find the controller to which the command to be aborted was sent */
4337         h = sdev_to_hba(scsicmd->device);
4338         if (h == NULL) /* paranoia */
4339                 return FAILED;
4340         dev = scsicmd->device->hostdata;
4341         if (!dev) {
4342                 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4343                         "device lookup failed.\n");
4344                 return FAILED;
4345         }
4346         dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4347                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4348         /* send a reset to the SCSI LUN which the command was sent to */
4349         rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
4350         if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4351                 return SUCCESS;
4352
4353         dev_warn(&h->pdev->dev, "resetting device failed.\n");
4354         return FAILED;
4355 }
4356
4357 static void swizzle_abort_tag(u8 *tag)
4358 {
4359         u8 original_tag[8];
4360
4361         memcpy(original_tag, tag, 8);
4362         tag[0] = original_tag[3];
4363         tag[1] = original_tag[2];
4364         tag[2] = original_tag[1];
4365         tag[3] = original_tag[0];
4366         tag[4] = original_tag[7];
4367         tag[5] = original_tag[6];
4368         tag[6] = original_tag[5];
4369         tag[7] = original_tag[4];
4370 }
4371
4372 static void hpsa_get_tag(struct ctlr_info *h,
4373         struct CommandList *c, __le32 *taglower, __le32 *tagupper)
4374 {
4375         u64 tag;
4376         if (c->cmd_type == CMD_IOACCEL1) {
4377                 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4378                         &h->ioaccel_cmd_pool[c->cmdindex];
4379                 tag = le64_to_cpu(cm1->tag);
4380                 *tagupper = cpu_to_le32(tag >> 32);
4381                 *taglower = cpu_to_le32(tag);
4382                 return;
4383         }
4384         if (c->cmd_type == CMD_IOACCEL2) {
4385                 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4386                         &h->ioaccel2_cmd_pool[c->cmdindex];
4387                 /* upper tag not used in ioaccel2 mode */
4388                 memset(tagupper, 0, sizeof(*tagupper));
4389                 *taglower = cm2->Tag;
4390                 return;
4391         }
4392         tag = le64_to_cpu(c->Header.tag);
4393         *tagupper = cpu_to_le32(tag >> 32);
4394         *taglower = cpu_to_le32(tag);
4395 }
4396
4397 static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
4398         struct CommandList *abort, int swizzle)
4399 {
4400         int rc = IO_OK;
4401         struct CommandList *c;
4402         struct ErrorInfo *ei;
4403         __le32 tagupper, taglower;
4404
4405         c = cmd_alloc(h);
4406         if (c == NULL) {        /* trouble... */
4407                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
4408                 return -ENOMEM;
4409         }
4410
4411         /* fill_cmd can't fail here, no buffer to map */
4412         (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4413                 0, 0, scsi3addr, TYPE_MSG);
4414         if (swizzle)
4415                 swizzle_abort_tag(&c->Request.CDB[4]);
4416         hpsa_scsi_do_simple_cmd_core(h, c);
4417         hpsa_get_tag(h, abort, &taglower, &tagupper);
4418         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
4419                 __func__, tagupper, taglower);
4420         /* no unmap needed here because no data xfer. */
4421
4422         ei = c->err_info;
4423         switch (ei->CommandStatus) {
4424         case CMD_SUCCESS:
4425                 break;
4426         case CMD_UNABORTABLE: /* Very common, don't make noise. */
4427                 rc = -1;
4428                 break;
4429         default:
4430                 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
4431                         __func__, tagupper, taglower);
4432                 hpsa_scsi_interpret_error(h, c);
4433                 rc = -1;
4434                 break;
4435         }
4436         cmd_free(h, c);
4437         dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4438                 __func__, tagupper, taglower);
4439         return rc;
4440 }
4441
4442 /* ioaccel2 path firmware cannot handle abort task requests.
4443  * Change abort requests to physical target reset, and send to the
4444  * address of the physical disk used for the ioaccel 2 command.
4445  * Return 0 on success (IO_OK)
4446  *       -1 on failure
4447  */
4448
4449 static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4450         unsigned char *scsi3addr, struct CommandList *abort)
4451 {
4452         int rc = IO_OK;
4453         struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4454         struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4455         unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4456         unsigned char *psa = &phys_scsi3addr[0];
4457
4458         /* Get a pointer to the hpsa logical device. */
4459         scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4460         dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4461         if (dev == NULL) {
4462                 dev_warn(&h->pdev->dev,
4463                         "Cannot abort: no device pointer for command.\n");
4464                         return -1; /* not abortable */
4465         }
4466
4467         if (h->raid_offload_debug > 0)
4468                 dev_info(&h->pdev->dev,
4469                         "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4470                         h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4471                         scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4472                         scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4473
4474         if (!dev->offload_enabled) {
4475                 dev_warn(&h->pdev->dev,
4476                         "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4477                 return -1; /* not abortable */
4478         }
4479
4480         /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4481         if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4482                 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4483                 return -1; /* not abortable */
4484         }
4485
4486         /* send the reset */
4487         if (h->raid_offload_debug > 0)
4488                 dev_info(&h->pdev->dev,
4489                         "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4490                         psa[0], psa[1], psa[2], psa[3],
4491                         psa[4], psa[5], psa[6], psa[7]);
4492         rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4493         if (rc != 0) {
4494                 dev_warn(&h->pdev->dev,
4495                         "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4496                         psa[0], psa[1], psa[2], psa[3],
4497                         psa[4], psa[5], psa[6], psa[7]);
4498                 return rc; /* failed to reset */
4499         }
4500
4501         /* wait for device to recover */
4502         if (wait_for_device_to_become_ready(h, psa) != 0) {
4503                 dev_warn(&h->pdev->dev,
4504                         "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4505                         psa[0], psa[1], psa[2], psa[3],
4506                         psa[4], psa[5], psa[6], psa[7]);
4507                 return -1;  /* failed to recover */
4508         }
4509
4510         /* device recovered */
4511         dev_info(&h->pdev->dev,
4512                 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4513                 psa[0], psa[1], psa[2], psa[3],
4514                 psa[4], psa[5], psa[6], psa[7]);
4515
4516         return rc; /* success */
4517 }
4518
4519 /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to
4520  * tell which kind we're dealing with, so we send the abort both ways.  There
4521  * shouldn't be any collisions between swizzled and unswizzled tags due to the
4522  * way we construct our tags but we check anyway in case the assumptions which
4523  * make this true someday become false.
4524  */
4525 static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4526         unsigned char *scsi3addr, struct CommandList *abort)
4527 {
4528         /* ioccelerator mode 2 commands should be aborted via the
4529          * accelerated path, since RAID path is unaware of these commands,
4530          * but underlying firmware can't handle abort TMF.
4531          * Change abort to physical device reset.
4532          */
4533         if (abort->cmd_type == CMD_IOACCEL2)
4534                 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4535
4536         return hpsa_send_abort(h, scsi3addr, abort, 0) &&
4537                         hpsa_send_abort(h, scsi3addr, abort, 1);
4538 }
4539
4540 /* Send an abort for the specified command.
4541  *      If the device and controller support it,
4542  *              send a task abort request.
4543  */
4544 static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4545 {
4546
4547         int i, rc;
4548         struct ctlr_info *h;
4549         struct hpsa_scsi_dev_t *dev;
4550         struct CommandList *abort; /* pointer to command to be aborted */
4551         struct scsi_cmnd *as;   /* ptr to scsi cmd inside aborted command. */
4552         char msg[256];          /* For debug messaging. */
4553         int ml = 0;
4554         __le32 tagupper, taglower;
4555         int refcount;
4556
4557         /* Find the controller of the command to be aborted */
4558         h = sdev_to_hba(sc->device);
4559         if (WARN(h == NULL,
4560                         "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4561                 return FAILED;
4562
4563         /* Check that controller supports some kind of task abort */
4564         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4565                 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4566                 return FAILED;
4567
4568         memset(msg, 0, sizeof(msg));
4569         ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%llu ",
4570                 h->scsi_host->host_no, sc->device->channel,
4571                 sc->device->id, sc->device->lun);
4572
4573         /* Find the device of the command to be aborted */
4574         dev = sc->device->hostdata;
4575         if (!dev) {
4576                 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4577                                 msg);
4578                 return FAILED;
4579         }
4580
4581         /* Get SCSI command to be aborted */
4582         abort = (struct CommandList *) sc->host_scribble;
4583         if (abort == NULL) {
4584                 /* This can happen if the command already completed. */
4585                 return SUCCESS;
4586         }
4587         refcount = atomic_inc_return(&abort->refcount);
4588         if (refcount == 1) { /* Command is done already. */
4589                 cmd_free(h, abort);
4590                 return SUCCESS;
4591         }
4592         hpsa_get_tag(h, abort, &taglower, &tagupper);
4593         ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
4594         as  = (struct scsi_cmnd *) abort->scsi_cmd;
4595         if (as != NULL)
4596                 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4597                         as->cmnd[0], as->serial_number);
4598         dev_dbg(&h->pdev->dev, "%s\n", msg);
4599         dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4600                 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4601         /*
4602          * Command is in flight, or possibly already completed
4603          * by the firmware (but not to the scsi mid layer) but we can't
4604          * distinguish which.  Send the abort down.
4605          */
4606         rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
4607         if (rc != 0) {
4608                 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4609                 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4610                         h->scsi_host->host_no,
4611                         dev->bus, dev->target, dev->lun);
4612                 cmd_free(h, abort);
4613                 return FAILED;
4614         }
4615         dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4616
4617         /* If the abort(s) above completed and actually aborted the
4618          * command, then the command to be aborted should already be
4619          * completed.  If not, wait around a bit more to see if they
4620          * manage to complete normally.
4621          */
4622 #define ABORT_COMPLETE_WAIT_SECS 30
4623         for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4624                 refcount = atomic_read(&abort->refcount);
4625                 if (refcount < 2) {
4626                         cmd_free(h, abort);
4627                         return SUCCESS;
4628                 } else {
4629                         msleep(100);
4630                 }
4631         }
4632         dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4633                 msg, ABORT_COMPLETE_WAIT_SECS);
4634         cmd_free(h, abort);
4635         return FAILED;
4636 }
4637
4638 /*
4639  * For operations that cannot sleep, a command block is allocated at init,
4640  * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4641  * which ones are free or in use.  Lock must be held when calling this.
4642  * cmd_free() is the complement.
4643  */
4644
4645 static struct CommandList *cmd_alloc(struct ctlr_info *h)
4646 {
4647         struct CommandList *c;
4648         int i;
4649         union u64bit temp64;
4650         dma_addr_t cmd_dma_handle, err_dma_handle;
4651         int refcount;
4652         unsigned long offset;
4653
4654         /*
4655          * There is some *extremely* small but non-zero chance that that
4656          * multiple threads could get in here, and one thread could
4657          * be scanning through the list of bits looking for a free
4658          * one, but the free ones are always behind him, and other
4659          * threads sneak in behind him and eat them before he can
4660          * get to them, so that while there is always a free one, a
4661          * very unlucky thread might be starved anyway, never able to
4662          * beat the other threads.  In reality, this happens so
4663          * infrequently as to be indistinguishable from never.
4664          */
4665
4666         offset = h->last_allocation; /* benignly racy */
4667         for (;;) {
4668                 i = find_next_zero_bit(h->cmd_pool_bits, h->nr_cmds, offset);
4669                 if (unlikely(i == h->nr_cmds)) {
4670                         offset = 0;
4671                         continue;
4672                 }
4673                 c = h->cmd_pool + i;
4674                 refcount = atomic_inc_return(&c->refcount);
4675                 if (unlikely(refcount > 1)) {
4676                         cmd_free(h, c); /* already in use */
4677                         offset = (i + 1) % h->nr_cmds;
4678                         continue;
4679                 }
4680                 set_bit(i & (BITS_PER_LONG - 1),
4681                         h->cmd_pool_bits + (i / BITS_PER_LONG));
4682                 break; /* it's ours now. */
4683         }
4684         h->last_allocation = i; /* benignly racy */
4685
4686         /* Zero out all of commandlist except the last field, refcount */
4687         memset(c, 0, offsetof(struct CommandList, refcount));
4688         c->Header.tag = cpu_to_le64((u64) (i << DIRECT_LOOKUP_SHIFT));
4689         cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(*c);
4690         c->err_info = h->errinfo_pool + i;
4691         memset(c->err_info, 0, sizeof(*c->err_info));
4692         err_dma_handle = h->errinfo_pool_dhandle
4693             + i * sizeof(*c->err_info);
4694
4695         c->cmdindex = i;
4696
4697         c->busaddr = (u32) cmd_dma_handle;
4698         temp64.val = (u64) err_dma_handle;
4699         c->ErrDesc.Addr = cpu_to_le64((u64) err_dma_handle);
4700         c->ErrDesc.Len = cpu_to_le32((u32) sizeof(*c->err_info));
4701
4702         c->h = h;
4703         return c;
4704 }
4705
4706 static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4707 {
4708         if (atomic_dec_and_test(&c->refcount)) {
4709                 int i;
4710
4711                 i = c - h->cmd_pool;
4712                 clear_bit(i & (BITS_PER_LONG - 1),
4713                           h->cmd_pool_bits + (i / BITS_PER_LONG));
4714         }
4715 }
4716
4717 #ifdef CONFIG_COMPAT
4718
4719 static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd,
4720         void __user *arg)
4721 {
4722         IOCTL32_Command_struct __user *arg32 =
4723             (IOCTL32_Command_struct __user *) arg;
4724         IOCTL_Command_struct arg64;
4725         IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4726         int err;
4727         u32 cp;
4728
4729         memset(&arg64, 0, sizeof(arg64));
4730         err = 0;
4731         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4732                            sizeof(arg64.LUN_info));
4733         err |= copy_from_user(&arg64.Request, &arg32->Request,
4734                            sizeof(arg64.Request));
4735         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4736                            sizeof(arg64.error_info));
4737         err |= get_user(arg64.buf_size, &arg32->buf_size);
4738         err |= get_user(cp, &arg32->buf);
4739         arg64.buf = compat_ptr(cp);
4740         err |= copy_to_user(p, &arg64, sizeof(arg64));
4741
4742         if (err)
4743                 return -EFAULT;
4744
4745         err = hpsa_ioctl(dev, CCISS_PASSTHRU, p);
4746         if (err)
4747                 return err;
4748         err |= copy_in_user(&arg32->error_info, &p->error_info,
4749                          sizeof(arg32->error_info));
4750         if (err)
4751                 return -EFAULT;
4752         return err;
4753 }
4754
4755 static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4756         int cmd, void __user *arg)
4757 {
4758         BIG_IOCTL32_Command_struct __user *arg32 =
4759             (BIG_IOCTL32_Command_struct __user *) arg;
4760         BIG_IOCTL_Command_struct arg64;
4761         BIG_IOCTL_Command_struct __user *p =
4762             compat_alloc_user_space(sizeof(arg64));
4763         int err;
4764         u32 cp;
4765
4766         memset(&arg64, 0, sizeof(arg64));
4767         err = 0;
4768         err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4769                            sizeof(arg64.LUN_info));
4770         err |= copy_from_user(&arg64.Request, &arg32->Request,
4771                            sizeof(arg64.Request));
4772         err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4773                            sizeof(arg64.error_info));
4774         err |= get_user(arg64.buf_size, &arg32->buf_size);
4775         err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4776         err |= get_user(cp, &arg32->buf);
4777         arg64.buf = compat_ptr(cp);
4778         err |= copy_to_user(p, &arg64, sizeof(arg64));
4779
4780         if (err)
4781                 return -EFAULT;
4782
4783         err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, p);
4784         if (err)
4785                 return err;
4786         err |= copy_in_user(&arg32->error_info, &p->error_info,
4787                          sizeof(arg32->error_info));
4788         if (err)
4789                 return -EFAULT;
4790         return err;
4791 }
4792
4793 static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
4794 {
4795         switch (cmd) {
4796         case CCISS_GETPCIINFO:
4797         case CCISS_GETINTINFO:
4798         case CCISS_SETINTINFO:
4799         case CCISS_GETNODENAME:
4800         case CCISS_SETNODENAME:
4801         case CCISS_GETHEARTBEAT:
4802         case CCISS_GETBUSTYPES:
4803         case CCISS_GETFIRMVER:
4804         case CCISS_GETDRIVVER:
4805         case CCISS_REVALIDVOLS:
4806         case CCISS_DEREGDISK:
4807         case CCISS_REGNEWDISK:
4808         case CCISS_REGNEWD:
4809         case CCISS_RESCANDISK:
4810         case CCISS_GETLUNINFO:
4811                 return hpsa_ioctl(dev, cmd, arg);
4812
4813         case CCISS_PASSTHRU32:
4814                 return hpsa_ioctl32_passthru(dev, cmd, arg);
4815         case CCISS_BIG_PASSTHRU32:
4816                 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4817
4818         default:
4819                 return -ENOIOCTLCMD;
4820         }
4821 }
4822 #endif
4823
4824 static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4825 {
4826         struct hpsa_pci_info pciinfo;
4827
4828         if (!argp)
4829                 return -EINVAL;
4830         pciinfo.domain = pci_domain_nr(h->pdev->bus);
4831         pciinfo.bus = h->pdev->bus->number;
4832         pciinfo.dev_fn = h->pdev->devfn;
4833         pciinfo.board_id = h->board_id;
4834         if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4835                 return -EFAULT;
4836         return 0;
4837 }
4838
4839 static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4840 {
4841         DriverVer_type DriverVer;
4842         unsigned char vmaj, vmin, vsubmin;
4843         int rc;
4844
4845         rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4846                 &vmaj, &vmin, &vsubmin);
4847         if (rc != 3) {
4848                 dev_info(&h->pdev->dev, "driver version string '%s' "
4849                         "unrecognized.", HPSA_DRIVER_VERSION);
4850                 vmaj = 0;
4851                 vmin = 0;
4852                 vsubmin = 0;
4853         }
4854         DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4855         if (!argp)
4856                 return -EINVAL;
4857         if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4858                 return -EFAULT;
4859         return 0;
4860 }
4861
4862 static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4863 {
4864         IOCTL_Command_struct iocommand;
4865         struct CommandList *c;
4866         char *buff = NULL;
4867         u64 temp64;
4868         int rc = 0;
4869
4870         if (!argp)
4871                 return -EINVAL;
4872         if (!capable(CAP_SYS_RAWIO))
4873                 return -EPERM;
4874         if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4875                 return -EFAULT;
4876         if ((iocommand.buf_size < 1) &&
4877             (iocommand.Request.Type.Direction != XFER_NONE)) {
4878                 return -EINVAL;
4879         }
4880         if (iocommand.buf_size > 0) {
4881                 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4882                 if (buff == NULL)
4883                         return -EFAULT;
4884                 if (iocommand.Request.Type.Direction & XFER_WRITE) {
4885                         /* Copy the data into the buffer we created */
4886                         if (copy_from_user(buff, iocommand.buf,
4887                                 iocommand.buf_size)) {
4888                                 rc = -EFAULT;
4889                                 goto out_kfree;
4890                         }
4891                 } else {
4892                         memset(buff, 0, iocommand.buf_size);
4893                 }
4894         }
4895         c = cmd_alloc(h);
4896         if (c == NULL) {
4897                 rc = -ENOMEM;
4898                 goto out_kfree;
4899         }
4900         /* Fill in the command type */
4901         c->cmd_type = CMD_IOCTL_PEND;
4902         /* Fill in Command Header */
4903         c->Header.ReplyQueue = 0; /* unused in simple mode */
4904         if (iocommand.buf_size > 0) {   /* buffer to fill */
4905                 c->Header.SGList = 1;
4906                 c->Header.SGTotal = cpu_to_le16(1);
4907         } else  { /* no buffers to fill */
4908                 c->Header.SGList = 0;
4909                 c->Header.SGTotal = cpu_to_le16(0);
4910         }
4911         memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4912
4913         /* Fill in Request block */
4914         memcpy(&c->Request, &iocommand.Request,
4915                 sizeof(c->Request));
4916
4917         /* Fill in the scatter gather information */
4918         if (iocommand.buf_size > 0) {
4919                 temp64 = pci_map_single(h->pdev, buff,
4920                         iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
4921                 if (dma_mapping_error(&h->pdev->dev, (dma_addr_t) temp64)) {
4922                         c->SG[0].Addr = cpu_to_le64(0);
4923                         c->SG[0].Len = cpu_to_le32(0);
4924                         rc = -ENOMEM;
4925                         goto out;
4926                 }
4927                 c->SG[0].Addr = cpu_to_le64(temp64);
4928                 c->SG[0].Len = cpu_to_le32(iocommand.buf_size);
4929                 c->SG[0].Ext = cpu_to_le32(HPSA_SG_LAST); /* not chaining */
4930         }
4931         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
4932         if (iocommand.buf_size > 0)
4933                 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
4934         check_ioctl_unit_attention(h, c);
4935
4936         /* Copy the error information out */
4937         memcpy(&iocommand.error_info, c->err_info,
4938                 sizeof(iocommand.error_info));
4939         if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
4940                 rc = -EFAULT;
4941                 goto out;
4942         }
4943         if ((iocommand.Request.Type.Direction & XFER_READ) &&
4944                 iocommand.buf_size > 0) {
4945                 /* Copy the data out of the buffer we created */
4946                 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
4947                         rc = -EFAULT;
4948                         goto out;
4949                 }
4950         }
4951 out:
4952         cmd_free(h, c);
4953 out_kfree:
4954         kfree(buff);
4955         return rc;
4956 }
4957
4958 static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4959 {
4960         BIG_IOCTL_Command_struct *ioc;
4961         struct CommandList *c;
4962         unsigned char **buff = NULL;
4963         int *buff_size = NULL;
4964         u64 temp64;
4965         BYTE sg_used = 0;
4966         int status = 0;
4967         u32 left;
4968         u32 sz;
4969         BYTE __user *data_ptr;
4970
4971         if (!argp)
4972                 return -EINVAL;
4973         if (!capable(CAP_SYS_RAWIO))
4974                 return -EPERM;
4975         ioc = (BIG_IOCTL_Command_struct *)
4976             kmalloc(sizeof(*ioc), GFP_KERNEL);
4977         if (!ioc) {
4978                 status = -ENOMEM;
4979                 goto cleanup1;
4980         }
4981         if (copy_from_user(ioc, argp, sizeof(*ioc))) {
4982                 status = -EFAULT;
4983                 goto cleanup1;
4984         }
4985         if ((ioc->buf_size < 1) &&
4986             (ioc->Request.Type.Direction != XFER_NONE)) {
4987                 status = -EINVAL;
4988                 goto cleanup1;
4989         }
4990         /* Check kmalloc limits  using all SGs */
4991         if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
4992                 status = -EINVAL;
4993                 goto cleanup1;
4994         }
4995         if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
4996                 status = -EINVAL;
4997                 goto cleanup1;
4998         }
4999         buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
5000         if (!buff) {
5001                 status = -ENOMEM;
5002                 goto cleanup1;
5003         }
5004         buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
5005         if (!buff_size) {
5006                 status = -ENOMEM;
5007                 goto cleanup1;
5008         }
5009         left = ioc->buf_size;
5010         data_ptr = ioc->buf;
5011         while (left) {
5012                 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5013                 buff_size[sg_used] = sz;
5014                 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5015                 if (buff[sg_used] == NULL) {
5016                         status = -ENOMEM;
5017                         goto cleanup1;
5018                 }
5019                 if (ioc->Request.Type.Direction & XFER_WRITE) {
5020                         if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5021                                 status = -EFAULT;
5022                                 goto cleanup1;
5023                         }
5024                 } else
5025                         memset(buff[sg_used], 0, sz);
5026                 left -= sz;
5027                 data_ptr += sz;
5028                 sg_used++;
5029         }
5030         c = cmd_alloc(h);
5031         if (c == NULL) {
5032                 status = -ENOMEM;
5033                 goto cleanup1;
5034         }
5035         c->cmd_type = CMD_IOCTL_PEND;
5036         c->Header.ReplyQueue = 0;
5037         c->Header.SGList = (u8) sg_used;
5038         c->Header.SGTotal = cpu_to_le16(sg_used);
5039         memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5040         memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5041         if (ioc->buf_size > 0) {
5042                 int i;
5043                 for (i = 0; i < sg_used; i++) {
5044                         temp64 = pci_map_single(h->pdev, buff[i],
5045                                     buff_size[i], PCI_DMA_BIDIRECTIONAL);
5046                         if (dma_mapping_error(&h->pdev->dev,
5047                                                         (dma_addr_t) temp64)) {
5048                                 c->SG[i].Addr = cpu_to_le64(0);
5049                                 c->SG[i].Len = cpu_to_le32(0);
5050                                 hpsa_pci_unmap(h->pdev, c, i,
5051                                         PCI_DMA_BIDIRECTIONAL);
5052                                 status = -ENOMEM;
5053                                 goto cleanup0;
5054                         }
5055                         c->SG[i].Addr = cpu_to_le64(temp64);
5056                         c->SG[i].Len = cpu_to_le32(buff_size[i]);
5057                         c->SG[i].Ext = cpu_to_le32(0);
5058                 }
5059                 c->SG[--i].Ext = cpu_to_le32(HPSA_SG_LAST);
5060         }
5061         hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
5062         if (sg_used)
5063                 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
5064         check_ioctl_unit_attention(h, c);
5065         /* Copy the error information out */
5066         memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5067         if (copy_to_user(argp, ioc, sizeof(*ioc))) {
5068                 status = -EFAULT;
5069                 goto cleanup0;
5070         }
5071         if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {
5072                 int i;
5073
5074                 /* Copy the data out of the buffer we created */
5075                 BYTE __user *ptr = ioc->buf;
5076                 for (i = 0; i < sg_used; i++) {
5077                         if (copy_to_user(ptr, buff[i], buff_size[i])) {
5078                                 status = -EFAULT;
5079                                 goto cleanup0;
5080                         }
5081                         ptr += buff_size[i];
5082                 }
5083         }
5084         status = 0;
5085 cleanup0:
5086         cmd_free(h, c);
5087 cleanup1:
5088         if (buff) {
5089                 int i;
5090
5091                 for (i = 0; i < sg_used; i++)
5092                         kfree(buff[i]);
5093                 kfree(buff);
5094         }
5095         kfree(buff_size);
5096         kfree(ioc);
5097         return status;
5098 }
5099
5100 static void check_ioctl_unit_attention(struct ctlr_info *h,
5101         struct CommandList *c)
5102 {
5103         if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5104                         c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5105                 (void) check_for_unit_attention(h, c);
5106 }
5107
5108 /*
5109  * ioctl
5110  */
5111 static int hpsa_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
5112 {
5113         struct ctlr_info *h;
5114         void __user *argp = (void __user *)arg;
5115         int rc;
5116
5117         h = sdev_to_hba(dev);
5118
5119         switch (cmd) {
5120         case CCISS_DEREGDISK:
5121         case CCISS_REGNEWDISK:
5122         case CCISS_REGNEWD:
5123                 hpsa_scan_start(h->scsi_host);
5124                 return 0;
5125         case CCISS_GETPCIINFO:
5126                 return hpsa_getpciinfo_ioctl(h, argp);
5127         case CCISS_GETDRIVVER:
5128                 return hpsa_getdrivver_ioctl(h, argp);
5129         case CCISS_PASSTHRU:
5130                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5131                         return -EAGAIN;
5132                 rc = hpsa_passthru_ioctl(h, argp);
5133                 atomic_inc(&h->passthru_cmds_avail);
5134                 return rc;
5135         case CCISS_BIG_PASSTHRU:
5136                 if (atomic_dec_if_positive(&h->passthru_cmds_avail) < 0)
5137                         return -EAGAIN;
5138                 rc = hpsa_big_passthru_ioctl(h, argp);
5139                 atomic_inc(&h->passthru_cmds_avail);
5140                 return rc;
5141         default:
5142                 return -ENOTTY;
5143         }
5144 }
5145
5146 static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5147                                 u8 reset_type)
5148 {
5149         struct CommandList *c;
5150
5151         c = cmd_alloc(h);
5152         if (!c)
5153                 return -ENOMEM;
5154         /* fill_cmd can't fail here, no data buffer to map */
5155         (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
5156                 RAID_CTLR_LUNID, TYPE_MSG);
5157         c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5158         c->waiting = NULL;
5159         enqueue_cmd_and_start_io(h, c);
5160         /* Don't wait for completion, the reset won't complete.  Don't free
5161          * the command either.  This is the last command we will send before
5162          * re-initializing everything, so it doesn't matter and won't leak.
5163          */
5164         return 0;
5165 }
5166
5167 static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
5168         void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
5169         int cmd_type)
5170 {
5171         int pci_dir = XFER_NONE;
5172         struct CommandList *a; /* for commands to be aborted */
5173
5174         c->cmd_type = CMD_IOCTL_PEND;
5175         c->Header.ReplyQueue = 0;
5176         if (buff != NULL && size > 0) {
5177                 c->Header.SGList = 1;
5178                 c->Header.SGTotal = cpu_to_le16(1);
5179         } else {
5180                 c->Header.SGList = 0;
5181                 c->Header.SGTotal = cpu_to_le16(0);
5182         }
5183         memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5184
5185         if (cmd_type == TYPE_CMD) {
5186                 switch (cmd) {
5187                 case HPSA_INQUIRY:
5188                         /* are we trying to read a vital product page */
5189                         if (page_code & VPD_PAGE) {
5190                                 c->Request.CDB[1] = 0x01;
5191                                 c->Request.CDB[2] = (page_code & 0xff);
5192                         }
5193                         c->Request.CDBLen = 6;
5194                         c->Request.type_attr_dir =
5195                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5196                         c->Request.Timeout = 0;
5197                         c->Request.CDB[0] = HPSA_INQUIRY;
5198                         c->Request.CDB[4] = size & 0xFF;
5199                         break;
5200                 case HPSA_REPORT_LOG:
5201                 case HPSA_REPORT_PHYS:
5202                         /* Talking to controller so It's a physical command
5203                            mode = 00 target = 0.  Nothing to write.
5204                          */
5205                         c->Request.CDBLen = 12;
5206                         c->Request.type_attr_dir =
5207                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5208                         c->Request.Timeout = 0;
5209                         c->Request.CDB[0] = cmd;
5210                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5211                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5212                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5213                         c->Request.CDB[9] = size & 0xFF;
5214                         break;
5215                 case HPSA_CACHE_FLUSH:
5216                         c->Request.CDBLen = 12;
5217                         c->Request.type_attr_dir =
5218                                         TYPE_ATTR_DIR(cmd_type,
5219                                                 ATTR_SIMPLE, XFER_WRITE);
5220                         c->Request.Timeout = 0;
5221                         c->Request.CDB[0] = BMIC_WRITE;
5222                         c->Request.CDB[6] = BMIC_CACHE_FLUSH;
5223                         c->Request.CDB[7] = (size >> 8) & 0xFF;
5224                         c->Request.CDB[8] = size & 0xFF;
5225                         break;
5226                 case TEST_UNIT_READY:
5227                         c->Request.CDBLen = 6;
5228                         c->Request.type_attr_dir =
5229                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5230                         c->Request.Timeout = 0;
5231                         break;
5232                 case HPSA_GET_RAID_MAP:
5233                         c->Request.CDBLen = 12;
5234                         c->Request.type_attr_dir =
5235                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5236                         c->Request.Timeout = 0;
5237                         c->Request.CDB[0] = HPSA_CISS_READ;
5238                         c->Request.CDB[1] = cmd;
5239                         c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5240                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5241                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5242                         c->Request.CDB[9] = size & 0xFF;
5243                         break;
5244                 case BMIC_SENSE_CONTROLLER_PARAMETERS:
5245                         c->Request.CDBLen = 10;
5246                         c->Request.type_attr_dir =
5247                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5248                         c->Request.Timeout = 0;
5249                         c->Request.CDB[0] = BMIC_READ;
5250                         c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS;
5251                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5252                         c->Request.CDB[8] = (size >> 8) & 0xFF;
5253                         break;
5254                 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
5255                         c->Request.CDBLen = 10;
5256                         c->Request.type_attr_dir =
5257                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_READ);
5258                         c->Request.Timeout = 0;
5259                         c->Request.CDB[0] = BMIC_READ;
5260                         c->Request.CDB[6] = BMIC_IDENTIFY_PHYSICAL_DEVICE;
5261                         c->Request.CDB[7] = (size >> 16) & 0xFF;
5262                         c->Request.CDB[8] = (size >> 8) & 0XFF;
5263                         break;
5264                 default:
5265                         dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5266                         BUG();
5267                         return -1;
5268                 }
5269         } else if (cmd_type == TYPE_MSG) {
5270                 switch (cmd) {
5271
5272                 case  HPSA_DEVICE_RESET_MSG:
5273                         c->Request.CDBLen = 16;
5274                         c->Request.type_attr_dir =
5275                                 TYPE_ATTR_DIR(cmd_type, ATTR_SIMPLE, XFER_NONE);
5276                         c->Request.Timeout = 0; /* Don't time out */
5277                         memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5278                         c->Request.CDB[0] =  cmd;
5279                         c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
5280                         /* If bytes 4-7 are zero, it means reset the */
5281                         /* LunID device */
5282                         c->Request.CDB[4] = 0x00;
5283                         c->Request.CDB[5] = 0x00;
5284                         c->Request.CDB[6] = 0x00;
5285                         c->Request.CDB[7] = 0x00;
5286                         break;
5287                 case  HPSA_ABORT_MSG:
5288                         a = buff;       /* point to command to be aborted */
5289                         dev_dbg(&h->pdev->dev,
5290                                 "Abort Tag:0x%016llx request Tag:0x%016llx",
5291                                 a->Header.tag, c->Header.tag);
5292                         c->Request.CDBLen = 16;
5293                         c->Request.type_attr_dir =
5294                                         TYPE_ATTR_DIR(cmd_type,
5295                                                 ATTR_SIMPLE, XFER_WRITE);
5296                         c->Request.Timeout = 0; /* Don't time out */
5297                         c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5298                         c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5299                         c->Request.CDB[2] = 0x00; /* reserved */
5300                         c->Request.CDB[3] = 0x00; /* reserved */
5301                         /* Tag to abort goes in CDB[4]-CDB[11] */
5302                         memcpy(&c->Request.CDB[4], &a->Header.tag,
5303                                 sizeof(a->Header.tag));
5304                         c->Request.CDB[12] = 0x00; /* reserved */
5305                         c->Request.CDB[13] = 0x00; /* reserved */
5306                         c->Request.CDB[14] = 0x00; /* reserved */
5307                         c->Request.CDB[15] = 0x00; /* reserved */
5308                 break;
5309                 default:
5310                         dev_warn(&h->pdev->dev, "unknown message type %d\n",
5311                                 cmd);
5312                         BUG();
5313                 }
5314         } else {
5315                 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5316                 BUG();
5317         }
5318
5319         switch (GET_DIR(c->Request.type_attr_dir)) {
5320         case XFER_READ:
5321                 pci_dir = PCI_DMA_FROMDEVICE;
5322                 break;
5323         case XFER_WRITE:
5324                 pci_dir = PCI_DMA_TODEVICE;
5325                 break;
5326         case XFER_NONE:
5327                 pci_dir = PCI_DMA_NONE;
5328                 break;
5329         default:
5330                 pci_dir = PCI_DMA_BIDIRECTIONAL;
5331         }
5332         if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5333                 return -1;
5334         return 0;
5335 }
5336
5337 /*
5338  * Map (physical) PCI mem into (virtual) kernel space
5339  */
5340 static void __iomem *remap_pci_mem(ulong base, ulong size)
5341 {
5342         ulong page_base = ((ulong) base) & PAGE_MASK;
5343         ulong page_offs = ((ulong) base) - page_base;
5344         void __iomem *page_remapped = ioremap_nocache(page_base,
5345                 page_offs + size);
5346
5347         return page_remapped ? (page_remapped + page_offs) : NULL;
5348 }
5349
5350 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
5351 {
5352         return h->access.command_completed(h, q);
5353 }
5354
5355 static inline bool interrupt_pending(struct ctlr_info *h)
5356 {
5357         return h->access.intr_pending(h);
5358 }
5359
5360 static inline long interrupt_not_for_us(struct ctlr_info *h)
5361 {
5362         return (h->access.intr_pending(h) == 0) ||
5363                 (h->interrupts_enabled == 0);
5364 }
5365
5366 static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5367         u32 raw_tag)
5368 {
5369         if (unlikely(tag_index >= h->nr_cmds)) {
5370                 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5371                 return 1;
5372         }
5373         return 0;
5374 }
5375
5376 static inline void finish_cmd(struct CommandList *c)
5377 {
5378         dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
5379         if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5380                         || c->cmd_type == CMD_IOACCEL2))
5381                 complete_scsi_command(c);
5382         else if (c->cmd_type == CMD_IOCTL_PEND)
5383                 complete(c->waiting);
5384 }
5385
5386
5387 static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
5388 {
5389 #define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5390 #define HPSA_SIMPLE_ERROR_BITS 0x03
5391         if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
5392                 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5393         return tag & ~HPSA_PERF_ERROR_BITS;
5394 }
5395
5396 /* process completion of an indexed ("direct lookup") command */
5397 static inline void process_indexed_cmd(struct ctlr_info *h,
5398         u32 raw_tag)
5399 {
5400         u32 tag_index;
5401         struct CommandList *c;
5402
5403         tag_index = raw_tag >> DIRECT_LOOKUP_SHIFT;
5404         if (!bad_tag(h, tag_index, raw_tag)) {
5405                 c = h->cmd_pool + tag_index;
5406                 finish_cmd(c);
5407         }
5408 }
5409
5410 /* Some controllers, like p400, will give us one interrupt
5411  * after a soft reset, even if we turned interrupts off.
5412  * Only need to check for this in the hpsa_xxx_discard_completions
5413  * functions.
5414  */
5415 static int ignore_bogus_interrupt(struct ctlr_info *h)
5416 {
5417         if (likely(!reset_devices))
5418                 return 0;
5419
5420         if (likely(h->interrupts_enabled))
5421                 return 0;
5422
5423         dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5424                 "(known firmware bug.)  Ignoring.\n");
5425
5426         return 1;
5427 }
5428
5429 /*
5430  * Convert &h->q[x] (passed to interrupt handlers) back to h.
5431  * Relies on (h-q[x] == x) being true for x such that
5432  * 0 <= x < MAX_REPLY_QUEUES.
5433  */
5434 static struct ctlr_info *queue_to_hba(u8 *queue)
5435 {
5436         return container_of((queue - *queue), struct ctlr_info, q[0]);
5437 }
5438
5439 static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5440 {
5441         struct ctlr_info *h = queue_to_hba(queue);
5442         u8 q = *(u8 *) queue;
5443         u32 raw_tag;
5444
5445         if (ignore_bogus_interrupt(h))
5446                 return IRQ_NONE;
5447
5448         if (interrupt_not_for_us(h))
5449                 return IRQ_NONE;
5450         h->last_intr_timestamp = get_jiffies_64();
5451         while (interrupt_pending(h)) {
5452                 raw_tag = get_next_completion(h, q);
5453                 while (raw_tag != FIFO_EMPTY)
5454                         raw_tag = next_command(h, q);
5455         }
5456         return IRQ_HANDLED;
5457 }
5458
5459 static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
5460 {
5461         struct ctlr_info *h = queue_to_hba(queue);
5462         u32 raw_tag;
5463         u8 q = *(u8 *) queue;
5464
5465         if (ignore_bogus_interrupt(h))
5466                 return IRQ_NONE;
5467
5468         h->last_intr_timestamp = get_jiffies_64();
5469         raw_tag = get_next_completion(h, q);
5470         while (raw_tag != FIFO_EMPTY)
5471                 raw_tag = next_command(h, q);
5472         return IRQ_HANDLED;
5473 }
5474
5475 static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
5476 {
5477         struct ctlr_info *h = queue_to_hba((u8 *) queue);
5478         u32 raw_tag;
5479         u8 q = *(u8 *) queue;
5480
5481         if (interrupt_not_for_us(h))
5482                 return IRQ_NONE;
5483         h->last_intr_timestamp = get_jiffies_64();
5484         while (interrupt_pending(h)) {
5485                 raw_tag = get_next_completion(h, q);
5486                 while (raw_tag != FIFO_EMPTY) {
5487                         process_indexed_cmd(h, raw_tag);
5488                         raw_tag = next_command(h, q);
5489                 }
5490         }
5491         return IRQ_HANDLED;
5492 }
5493
5494 static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
5495 {
5496         struct ctlr_info *h = queue_to_hba(queue);
5497         u32 raw_tag;
5498         u8 q = *(u8 *) queue;
5499
5500         h->last_intr_timestamp = get_jiffies_64();
5501         raw_tag = get_next_completion(h, q);
5502         while (raw_tag != FIFO_EMPTY) {
5503                 process_indexed_cmd(h, raw_tag);
5504                 raw_tag = next_command(h, q);
5505         }
5506         return IRQ_HANDLED;
5507 }
5508
5509 /* Send a message CDB to the firmware. Careful, this only works
5510  * in simple mode, not performant mode due to the tag lookup.
5511  * We only ever use this immediately after a controller reset.
5512  */
5513 static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5514                         unsigned char type)
5515 {
5516         struct Command {
5517                 struct CommandListHeader CommandHeader;
5518                 struct RequestBlock Request;
5519                 struct ErrDescriptor ErrorDescriptor;
5520         };
5521         struct Command *cmd;
5522         static const size_t cmd_sz = sizeof(*cmd) +
5523                                         sizeof(cmd->ErrorDescriptor);
5524         dma_addr_t paddr64;
5525         __le32 paddr32;
5526         u32 tag;
5527         void __iomem *vaddr;
5528         int i, err;
5529
5530         vaddr = pci_ioremap_bar(pdev, 0);
5531         if (vaddr == NULL)
5532                 return -ENOMEM;
5533
5534         /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5535          * CCISS commands, so they must be allocated from the lower 4GiB of
5536          * memory.
5537          */
5538         err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5539         if (err) {
5540                 iounmap(vaddr);
5541                 return err;
5542         }
5543
5544         cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5545         if (cmd == NULL) {
5546                 iounmap(vaddr);
5547                 return -ENOMEM;
5548         }
5549
5550         /* This must fit, because of the 32-bit consistent DMA mask.  Also,
5551          * although there's no guarantee, we assume that the address is at
5552          * least 4-byte aligned (most likely, it's page-aligned).
5553          */
5554         paddr32 = cpu_to_le32(paddr64);
5555
5556         cmd->CommandHeader.ReplyQueue = 0;
5557         cmd->CommandHeader.SGList = 0;
5558         cmd->CommandHeader.SGTotal = cpu_to_le16(0);
5559         cmd->CommandHeader.tag = cpu_to_le64(paddr64);
5560         memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5561
5562         cmd->Request.CDBLen = 16;
5563         cmd->Request.type_attr_dir =
5564                         TYPE_ATTR_DIR(TYPE_MSG, ATTR_HEADOFQUEUE, XFER_NONE);
5565         cmd->Request.Timeout = 0; /* Don't time out */
5566         cmd->Request.CDB[0] = opcode;
5567         cmd->Request.CDB[1] = type;
5568         memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5569         cmd->ErrorDescriptor.Addr =
5570                         cpu_to_le64((le32_to_cpu(paddr32) + sizeof(*cmd)));
5571         cmd->ErrorDescriptor.Len = cpu_to_le32(sizeof(struct ErrorInfo));
5572
5573         writel(le32_to_cpu(paddr32), vaddr + SA5_REQUEST_PORT_OFFSET);
5574
5575         for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5576                 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
5577                 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr64)
5578                         break;
5579                 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5580         }
5581
5582         iounmap(vaddr);
5583
5584         /* we leak the DMA buffer here ... no choice since the controller could
5585          *  still complete the command.
5586          */
5587         if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5588                 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5589                         opcode, type);
5590                 return -ETIMEDOUT;
5591         }
5592
5593         pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5594
5595         if (tag & HPSA_ERROR_BIT) {
5596                 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5597                         opcode, type);
5598                 return -EIO;
5599         }
5600
5601         dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5602                 opcode, type);
5603         return 0;
5604 }
5605
5606 #define hpsa_noop(p) hpsa_message(p, 3, 0)
5607
5608 static int hpsa_controller_hard_reset(struct pci_dev *pdev,
5609         void __iomem *vaddr, u32 use_doorbell)
5610 {
5611
5612         if (use_doorbell) {
5613                 /* For everything after the P600, the PCI power state method
5614                  * of resetting the controller doesn't work, so we have this
5615                  * other way using the doorbell register.
5616                  */
5617                 dev_info(&pdev->dev, "using doorbell to reset controller\n");
5618                 writel(use_doorbell, vaddr + SA5_DOORBELL);
5619
5620                 /* PMC hardware guys tell us we need a 10 second delay after
5621                  * doorbell reset and before any attempt to talk to the board
5622                  * at all to ensure that this actually works and doesn't fall
5623                  * over in some weird corner cases.
5624                  */
5625                 msleep(10000);
5626         } else { /* Try to do it the PCI power state way */
5627
5628                 /* Quoting from the Open CISS Specification: "The Power
5629                  * Management Control/Status Register (CSR) controls the power
5630                  * state of the device.  The normal operating state is D0,
5631                  * CSR=00h.  The software off state is D3, CSR=03h.  To reset
5632                  * the controller, place the interface device in D3 then to D0,
5633                  * this causes a secondary PCI reset which will reset the
5634                  * controller." */
5635
5636                 int rc = 0;
5637
5638                 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5639
5640                 /* enter the D3hot power management state */
5641                 rc = pci_set_power_state(pdev, PCI_D3hot);
5642                 if (rc)
5643                         return rc;
5644
5645                 msleep(500);
5646
5647                 /* enter the D0 power management state */
5648                 rc = pci_set_power_state(pdev, PCI_D0);
5649                 if (rc)
5650                         return rc;
5651
5652                 /*
5653                  * The P600 requires a small delay when changing states.
5654                  * Otherwise we may think the board did not reset and we bail.
5655                  * This for kdump only and is particular to the P600.
5656                  */
5657                 msleep(500);
5658         }
5659         return 0;
5660 }
5661
5662 static void init_driver_version(char *driver_version, int len)
5663 {
5664         memset(driver_version, 0, len);
5665         strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
5666 }
5667
5668 static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
5669 {
5670         char *driver_version;
5671         int i, size = sizeof(cfgtable->driver_version);
5672
5673         driver_version = kmalloc(size, GFP_KERNEL);
5674         if (!driver_version)
5675                 return -ENOMEM;
5676
5677         init_driver_version(driver_version, size);
5678         for (i = 0; i < size; i++)
5679                 writeb(driver_version[i], &cfgtable->driver_version[i]);
5680         kfree(driver_version);
5681         return 0;
5682 }
5683
5684 static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5685                                           unsigned char *driver_ver)
5686 {
5687         int i;
5688
5689         for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5690                 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5691 }
5692
5693 static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
5694 {
5695
5696         char *driver_ver, *old_driver_ver;
5697         int rc, size = sizeof(cfgtable->driver_version);
5698
5699         old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5700         if (!old_driver_ver)
5701                 return -ENOMEM;
5702         driver_ver = old_driver_ver + size;
5703
5704         /* After a reset, the 32 bytes of "driver version" in the cfgtable
5705          * should have been changed, otherwise we know the reset failed.
5706          */
5707         init_driver_version(old_driver_ver, size);
5708         read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5709         rc = !memcmp(driver_ver, old_driver_ver, size);
5710         kfree(old_driver_ver);
5711         return rc;
5712 }
5713 /* This does a hard reset of the controller using PCI power management
5714  * states or the using the doorbell register.
5715  */
5716 static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
5717 {
5718         u64 cfg_offset;
5719         u32 cfg_base_addr;
5720         u64 cfg_base_addr_index;
5721         void __iomem *vaddr;
5722         unsigned long paddr;
5723         u32 misc_fw_support;
5724         int rc;
5725         struct CfgTable __iomem *cfgtable;
5726         u32 use_doorbell;
5727         u32 board_id;
5728         u16 command_register;
5729
5730         /* For controllers as old as the P600, this is very nearly
5731          * the same thing as
5732          *
5733          * pci_save_state(pci_dev);
5734          * pci_set_power_state(pci_dev, PCI_D3hot);
5735          * pci_set_power_state(pci_dev, PCI_D0);
5736          * pci_restore_state(pci_dev);
5737          *
5738          * For controllers newer than the P600, the pci power state
5739          * method of resetting doesn't work so we have another way
5740          * using the doorbell register.
5741          */
5742
5743         rc = hpsa_lookup_board_id(pdev, &board_id);
5744         if (rc < 0) {
5745                 dev_warn(&pdev->dev, "Board ID not found\n");
5746                 return rc;
5747         }
5748         if (!ctlr_is_resettable(board_id)) {
5749                 dev_warn(&pdev->dev, "Controller not resettable\n");
5750                 return -ENODEV;
5751         }
5752
5753         /* if controller is soft- but not hard resettable... */
5754         if (!ctlr_is_hard_resettable(board_id))
5755                 return -ENOTSUPP; /* try soft reset later. */
5756
5757         /* Save the PCI command register */
5758         pci_read_config_word(pdev, 4, &command_register);
5759         pci_save_state(pdev);
5760
5761         /* find the first memory BAR, so we can find the cfg table */
5762         rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5763         if (rc)
5764                 return rc;
5765         vaddr = remap_pci_mem(paddr, 0x250);
5766         if (!vaddr)
5767                 return -ENOMEM;
5768
5769         /* find cfgtable in order to check if reset via doorbell is supported */
5770         rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5771                                         &cfg_base_addr_index, &cfg_offset);
5772         if (rc)
5773                 goto unmap_vaddr;
5774         cfgtable = remap_pci_mem(pci_resource_start(pdev,
5775                        cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5776         if (!cfgtable) {
5777                 rc = -ENOMEM;
5778                 goto unmap_vaddr;
5779         }
5780         rc = write_driver_ver_to_cfgtable(cfgtable);
5781         if (rc)
5782                 goto unmap_cfgtable;
5783
5784         /* If reset via doorbell register is supported, use that.
5785          * There are two such methods.  Favor the newest method.
5786          */
5787         misc_fw_support = readl(&cfgtable->misc_fw_support);
5788         use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5789         if (use_doorbell) {
5790                 use_doorbell = DOORBELL_CTLR_RESET2;
5791         } else {
5792                 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5793                 if (use_doorbell) {
5794                         dev_warn(&pdev->dev,
5795                                 "Soft reset not supported. Firmware update is required.\n");
5796                         rc = -ENOTSUPP; /* try soft reset */
5797                         goto unmap_cfgtable;
5798                 }
5799         }
5800
5801         rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5802         if (rc)
5803                 goto unmap_cfgtable;
5804
5805         pci_restore_state(pdev);
5806         pci_write_config_word(pdev, 4, command_register);
5807
5808         /* Some devices (notably the HP Smart Array 5i Controller)
5809            need a little pause here */
5810         msleep(HPSA_POST_RESET_PAUSE_MSECS);
5811
5812         rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5813         if (rc) {
5814                 dev_warn(&pdev->dev,
5815                         "Failed waiting for board to become ready after hard reset\n");
5816                 goto unmap_cfgtable;
5817         }
5818
5819         rc = controller_reset_failed(vaddr);
5820         if (rc < 0)
5821                 goto unmap_cfgtable;
5822         if (rc) {
5823                 dev_warn(&pdev->dev, "Unable to successfully reset "
5824                         "controller. Will try soft reset.\n");
5825                 rc = -ENOTSUPP;
5826         } else {
5827                 dev_info(&pdev->dev, "board ready after hard reset.\n");
5828         }
5829
5830 unmap_cfgtable:
5831         iounmap(cfgtable);
5832
5833 unmap_vaddr:
5834         iounmap(vaddr);
5835         return rc;
5836 }
5837
5838 /*
5839  *  We cannot read the structure directly, for portability we must use
5840  *   the io functions.
5841  *   This is for debug only.
5842  */
5843 static void print_cfg_table(struct device *dev, struct CfgTable __iomem *tb)
5844 {
5845 #ifdef HPSA_DEBUG
5846         int i;
5847         char temp_name[17];
5848
5849         dev_info(dev, "Controller Configuration information\n");
5850         dev_info(dev, "------------------------------------\n");
5851         for (i = 0; i < 4; i++)
5852                 temp_name[i] = readb(&(tb->Signature[i]));
5853         temp_name[4] = '\0';
5854         dev_info(dev, "   Signature = %s\n", temp_name);
5855         dev_info(dev, "   Spec Number = %d\n", readl(&(tb->SpecValence)));
5856         dev_info(dev, "   Transport methods supported = 0x%x\n",
5857                readl(&(tb->TransportSupport)));
5858         dev_info(dev, "   Transport methods active = 0x%x\n",
5859                readl(&(tb->TransportActive)));
5860         dev_info(dev, "   Requested transport Method = 0x%x\n",
5861                readl(&(tb->HostWrite.TransportRequest)));
5862         dev_info(dev, "   Coalesce Interrupt Delay = 0x%x\n",
5863                readl(&(tb->HostWrite.CoalIntDelay)));
5864         dev_info(dev, "   Coalesce Interrupt Count = 0x%x\n",
5865                readl(&(tb->HostWrite.CoalIntCount)));
5866         dev_info(dev, "   Max outstanding commands = %d\n",
5867                readl(&(tb->CmdsOutMax)));
5868         dev_info(dev, "   Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
5869         for (i = 0; i < 16; i++)
5870                 temp_name[i] = readb(&(tb->ServerName[i]));
5871         temp_name[16] = '\0';
5872         dev_info(dev, "   Server Name = %s\n", temp_name);
5873         dev_info(dev, "   Heartbeat Counter = 0x%x\n\n\n",
5874                 readl(&(tb->HeartBeat)));
5875 #endif                          /* HPSA_DEBUG */
5876 }
5877
5878 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5879 {
5880         int i, offset, mem_type, bar_type;
5881
5882         if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
5883                 return 0;
5884         offset = 0;
5885         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5886                 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
5887                 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
5888                         offset += 4;
5889                 else {
5890                         mem_type = pci_resource_flags(pdev, i) &
5891                             PCI_BASE_ADDRESS_MEM_TYPE_MASK;
5892                         switch (mem_type) {
5893                         case PCI_BASE_ADDRESS_MEM_TYPE_32:
5894                         case PCI_BASE_ADDRESS_MEM_TYPE_1M:
5895                                 offset += 4;    /* 32 bit */
5896                                 break;
5897                         case PCI_BASE_ADDRESS_MEM_TYPE_64:
5898                                 offset += 8;
5899                                 break;
5900                         default:        /* reserved in PCI 2.2 */
5901                                 dev_warn(&pdev->dev,
5902                                        "base address is invalid\n");
5903                                 return -1;
5904                                 break;
5905                         }
5906                 }
5907                 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
5908                         return i + 1;
5909         }
5910         return -1;
5911 }
5912
5913 /* If MSI/MSI-X is supported by the kernel we will try to enable it on
5914  * controllers that are capable. If not, we use legacy INTx mode.
5915  */
5916
5917 static void hpsa_interrupt_mode(struct ctlr_info *h)
5918 {
5919 #ifdef CONFIG_PCI_MSI
5920         int err, i;
5921         struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
5922
5923         for (i = 0; i < MAX_REPLY_QUEUES; i++) {
5924                 hpsa_msix_entries[i].vector = 0;
5925                 hpsa_msix_entries[i].entry = i;
5926         }
5927
5928         /* Some boards advertise MSI but don't really support it */
5929         if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
5930             (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
5931                 goto default_int_mode;
5932         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
5933                 dev_info(&h->pdev->dev, "MSI-X capable controller\n");
5934                 h->msix_vector = MAX_REPLY_QUEUES;
5935                 if (h->msix_vector > num_online_cpus())
5936                         h->msix_vector = num_online_cpus();
5937                 err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
5938                                             1, h->msix_vector);
5939                 if (err < 0) {
5940                         dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
5941                         h->msix_vector = 0;
5942                         goto single_msi_mode;
5943                 } else if (err < h->msix_vector) {
5944                         dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
5945                                "available\n", err);
5946                 }
5947                 h->msix_vector = err;
5948                 for (i = 0; i < h->msix_vector; i++)
5949                         h->intr[i] = hpsa_msix_entries[i].vector;
5950                 return;
5951         }
5952 single_msi_mode:
5953         if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
5954                 dev_info(&h->pdev->dev, "MSI capable controller\n");
5955                 if (!pci_enable_msi(h->pdev))
5956                         h->msi_vector = 1;
5957                 else
5958                         dev_warn(&h->pdev->dev, "MSI init failed\n");
5959         }
5960 default_int_mode:
5961 #endif                          /* CONFIG_PCI_MSI */
5962         /* if we get here we're going to use the default interrupt mode */
5963         h->intr[h->intr_mode] = h->pdev->irq;
5964 }
5965
5966 static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
5967 {
5968         int i;
5969         u32 subsystem_vendor_id, subsystem_device_id;
5970
5971         subsystem_vendor_id = pdev->subsystem_vendor;
5972         subsystem_device_id = pdev->subsystem_device;
5973         *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
5974                     subsystem_vendor_id;
5975
5976         for (i = 0; i < ARRAY_SIZE(products); i++)
5977                 if (*board_id == products[i].board_id)
5978                         return i;
5979
5980         if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
5981                 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
5982                 !hpsa_allow_any) {
5983                 dev_warn(&pdev->dev, "unrecognized board ID: "
5984                         "0x%08x, ignoring.\n", *board_id);
5985                         return -ENODEV;
5986         }
5987         return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
5988 }
5989
5990 static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
5991                                     unsigned long *memory_bar)
5992 {
5993         int i;
5994
5995         for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
5996                 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
5997                         /* addressing mode bits already removed */
5998                         *memory_bar = pci_resource_start(pdev, i);
5999                         dev_dbg(&pdev->dev, "memory BAR = %lx\n",
6000                                 *memory_bar);
6001                         return 0;
6002                 }
6003         dev_warn(&pdev->dev, "no memory BAR found\n");
6004         return -ENODEV;
6005 }
6006
6007 static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6008                                      int wait_for_ready)
6009 {
6010         int i, iterations;
6011         u32 scratchpad;
6012         if (wait_for_ready)
6013                 iterations = HPSA_BOARD_READY_ITERATIONS;
6014         else
6015                 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
6016
6017         for (i = 0; i < iterations; i++) {
6018                 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6019                 if (wait_for_ready) {
6020                         if (scratchpad == HPSA_FIRMWARE_READY)
6021                                 return 0;
6022                 } else {
6023                         if (scratchpad != HPSA_FIRMWARE_READY)
6024                                 return 0;
6025                 }
6026                 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6027         }
6028         dev_warn(&pdev->dev, "board not ready, timed out.\n");
6029         return -ENODEV;
6030 }
6031
6032 static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6033                                u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6034                                u64 *cfg_offset)
6035 {
6036         *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6037         *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6038         *cfg_base_addr &= (u32) 0x0000ffff;
6039         *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6040         if (*cfg_base_addr_index == -1) {
6041                 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6042                 return -ENODEV;
6043         }
6044         return 0;
6045 }
6046
6047 static int hpsa_find_cfgtables(struct ctlr_info *h)
6048 {
6049         u64 cfg_offset;
6050         u32 cfg_base_addr;
6051         u64 cfg_base_addr_index;
6052         u32 trans_offset;
6053         int rc;
6054
6055         rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6056                 &cfg_base_addr_index, &cfg_offset);
6057         if (rc)
6058                 return rc;
6059         h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
6060                        cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
6061         if (!h->cfgtable) {
6062                 dev_err(&h->pdev->dev, "Failed mapping cfgtable\n");
6063                 return -ENOMEM;
6064         }
6065         rc = write_driver_ver_to_cfgtable(h->cfgtable);
6066         if (rc)
6067                 return rc;
6068         /* Find performant mode table. */
6069         trans_offset = readl(&h->cfgtable->TransMethodOffset);
6070         h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6071                                 cfg_base_addr_index)+cfg_offset+trans_offset,
6072                                 sizeof(*h->transtable));
6073         if (!h->transtable)
6074                 return -ENOMEM;
6075         return 0;
6076 }
6077
6078 static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
6079 {
6080         h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
6081
6082         /* Limit commands in memory limited kdump scenario. */
6083         if (reset_devices && h->max_commands > 32)
6084                 h->max_commands = 32;
6085
6086         if (h->max_commands < 16) {
6087                 dev_warn(&h->pdev->dev, "Controller reports "
6088                         "max supported commands of %d, an obvious lie. "
6089                         "Using 16.  Ensure that firmware is up to date.\n",
6090                         h->max_commands);
6091                 h->max_commands = 16;
6092         }
6093 }
6094
6095 /* If the controller reports that the total max sg entries is greater than 512,
6096  * then we know that chained SG blocks work.  (Original smart arrays did not
6097  * support chained SG blocks and would return zero for max sg entries.)
6098  */
6099 static int hpsa_supports_chained_sg_blocks(struct ctlr_info *h)
6100 {
6101         return h->maxsgentries > 512;
6102 }
6103
6104 /* Interrogate the hardware for some limits:
6105  * max commands, max SG elements without chaining, and with chaining,
6106  * SG chain block size, etc.
6107  */
6108 static void hpsa_find_board_params(struct ctlr_info *h)
6109 {
6110         hpsa_get_max_perf_mode_cmds(h);
6111         h->nr_cmds = h->max_commands;
6112         h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
6113         h->fw_support = readl(&(h->cfgtable->misc_fw_support));
6114         if (hpsa_supports_chained_sg_blocks(h)) {
6115                 /* Limit in-command s/g elements to 32 save dma'able memory. */
6116                 h->max_cmd_sg_entries = 32;
6117                 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries;
6118                 h->maxsgentries--; /* save one for chain pointer */
6119         } else {
6120                 /*
6121                  * Original smart arrays supported at most 31 s/g entries
6122                  * embedded inline in the command (trying to use more
6123                  * would lock up the controller)
6124                  */
6125                 h->max_cmd_sg_entries = 31;
6126                 h->maxsgentries = 31; /* default to traditional values */
6127                 h->chainsize = 0;
6128         }
6129
6130         /* Find out what task management functions are supported and cache */
6131         h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
6132         if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6133                 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6134         if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6135                 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
6136 }
6137
6138 static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6139 {
6140         if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
6141                 dev_err(&h->pdev->dev, "not a valid CISS config table\n");
6142                 return false;
6143         }
6144         return true;
6145 }
6146
6147 static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
6148 {
6149         u32 driver_support;
6150
6151         driver_support = readl(&(h->cfgtable->driver_support));
6152         /* Need to enable prefetch in the SCSI core for 6400 in x86 */
6153 #ifdef CONFIG_X86
6154         driver_support |= ENABLE_SCSI_PREFETCH;
6155 #endif
6156         driver_support |= ENABLE_UNIT_ATTN;
6157         writel(driver_support, &(h->cfgtable->driver_support));
6158 }
6159
6160 /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result
6161  * in a prefetch beyond physical memory.
6162  */
6163 static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6164 {
6165         u32 dma_prefetch;
6166
6167         if (h->board_id != 0x3225103C)
6168                 return;
6169         dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6170         dma_prefetch |= 0x8000;
6171         writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6172 }
6173
6174 static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6175 {
6176         int i;
6177         u32 doorbell_value;
6178         unsigned long flags;
6179         /* wait until the clear_event_notify bit 6 is cleared by controller. */
6180         for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6181                 spin_lock_irqsave(&h->lock, flags);
6182                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6183                 spin_unlock_irqrestore(&h->lock, flags);
6184                 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6185                         break;
6186                 /* delay and try again */
6187                 msleep(20);
6188         }
6189 }
6190
6191 static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
6192 {
6193         int i;
6194         u32 doorbell_value;
6195         unsigned long flags;
6196
6197         /* under certain very rare conditions, this can take awhile.
6198          * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6199          * as we enter this code.)
6200          */
6201         for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6202                 spin_lock_irqsave(&h->lock, flags);
6203                 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6204                 spin_unlock_irqrestore(&h->lock, flags);
6205                 if (!(doorbell_value & CFGTBL_ChangeReq))
6206                         break;
6207                 /* delay and try again */
6208                 usleep_range(10000, 20000);
6209         }
6210 }
6211
6212 static int hpsa_enter_simple_mode(struct ctlr_info *h)
6213 {
6214         u32 trans_support;
6215
6216         trans_support = readl(&(h->cfgtable->TransportSupport));
6217         if (!(trans_support & SIMPLE_MODE))
6218                 return -ENOTSUPP;
6219
6220         h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
6221
6222         /* Update the field, and then ring the doorbell */
6223         writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
6224         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
6225         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6226         hpsa_wait_for_mode_change_ack(h);
6227         print_cfg_table(&h->pdev->dev, h->cfgtable);
6228         if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6229                 goto error;
6230         h->transMethod = CFGTBL_Trans_Simple;
6231         return 0;
6232 error:
6233         dev_err(&h->pdev->dev, "failed to enter simple mode\n");
6234         return -ENODEV;
6235 }
6236
6237 static int hpsa_pci_init(struct ctlr_info *h)
6238 {
6239         int prod_index, err;
6240
6241         prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6242         if (prod_index < 0)
6243                 return prod_index;
6244         h->product_name = products[prod_index].product_name;
6245         h->access = *(products[prod_index].access);
6246
6247         pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6248                                PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6249
6250         err = pci_enable_device(h->pdev);
6251         if (err) {
6252                 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
6253                 return err;
6254         }
6255
6256         err = pci_request_regions(h->pdev, HPSA);
6257         if (err) {
6258                 dev_err(&h->pdev->dev,
6259                         "cannot obtain PCI resources, aborting\n");
6260                 return err;
6261         }
6262
6263         pci_set_master(h->pdev);
6264
6265         hpsa_interrupt_mode(h);
6266         err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
6267         if (err)
6268                 goto err_out_free_res;
6269         h->vaddr = remap_pci_mem(h->paddr, 0x250);
6270         if (!h->vaddr) {
6271                 err = -ENOMEM;
6272                 goto err_out_free_res;
6273         }
6274         err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
6275         if (err)
6276                 goto err_out_free_res;
6277         err = hpsa_find_cfgtables(h);
6278         if (err)
6279                 goto err_out_free_res;
6280         hpsa_find_board_params(h);
6281
6282         if (!hpsa_CISS_signature_present(h)) {
6283                 err = -ENODEV;
6284                 goto err_out_free_res;
6285         }
6286         hpsa_set_driver_support_bits(h);
6287         hpsa_p600_dma_prefetch_quirk(h);
6288         err = hpsa_enter_simple_mode(h);
6289         if (err)
6290                 goto err_out_free_res;
6291         return 0;
6292
6293 err_out_free_res:
6294         if (h->transtable)
6295                 iounmap(h->transtable);
6296         if (h->cfgtable)
6297                 iounmap(h->cfgtable);
6298         if (h->vaddr)
6299                 iounmap(h->vaddr);
6300         pci_disable_device(h->pdev);
6301         pci_release_regions(h->pdev);
6302         return err;
6303 }
6304
6305 static void hpsa_hba_inquiry(struct ctlr_info *h)
6306 {
6307         int rc;
6308
6309 #define HBA_INQUIRY_BYTE_COUNT 64
6310         h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6311         if (!h->hba_inquiry_data)
6312                 return;
6313         rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6314                 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6315         if (rc != 0) {
6316                 kfree(h->hba_inquiry_data);
6317                 h->hba_inquiry_data = NULL;
6318         }
6319 }
6320
6321 static int hpsa_init_reset_devices(struct pci_dev *pdev)
6322 {
6323         int rc, i;
6324         void __iomem *vaddr;
6325
6326         if (!reset_devices)
6327                 return 0;
6328
6329         /* kdump kernel is loading, we don't know in which state is
6330          * the pci interface. The dev->enable_cnt is equal zero
6331          * so we call enable+disable, wait a while and switch it on.
6332          */
6333         rc = pci_enable_device(pdev);
6334         if (rc) {
6335                 dev_warn(&pdev->dev, "Failed to enable PCI device\n");
6336                 return -ENODEV;
6337         }
6338         pci_disable_device(pdev);
6339         msleep(260);                    /* a randomly chosen number */
6340         rc = pci_enable_device(pdev);
6341         if (rc) {
6342                 dev_warn(&pdev->dev, "failed to enable device.\n");
6343                 return -ENODEV;
6344         }
6345
6346         pci_set_master(pdev);
6347
6348         vaddr = pci_ioremap_bar(pdev, 0);
6349         if (vaddr == NULL) {
6350                 rc = -ENOMEM;
6351                 goto out_disable;
6352         }
6353         writel(SA5_INTR_OFF, vaddr + SA5_REPLY_INTR_MASK_OFFSET);
6354         iounmap(vaddr);
6355
6356         /* Reset the controller with a PCI power-cycle or via doorbell */
6357         rc = hpsa_kdump_hard_reset_controller(pdev);
6358
6359         /* -ENOTSUPP here means we cannot reset the controller
6360          * but it's already (and still) up and running in
6361          * "performant mode".  Or, it might be 640x, which can't reset
6362          * due to concerns about shared bbwc between 6402/6404 pair.
6363          */
6364         if (rc)
6365                 goto out_disable;
6366
6367         /* Now try to get the controller to respond to a no-op */
6368         dev_info(&pdev->dev, "Waiting for controller to respond to no-op\n");
6369         for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6370                 if (hpsa_noop(pdev) == 0)
6371                         break;
6372                 else
6373                         dev_warn(&pdev->dev, "no-op failed%s\n",
6374                                         (i < 11 ? "; re-trying" : ""));
6375         }
6376
6377 out_disable:
6378
6379         pci_disable_device(pdev);
6380         return rc;
6381 }
6382
6383 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
6384 {
6385         h->cmd_pool_bits = kzalloc(
6386                 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6387                 sizeof(unsigned long), GFP_KERNEL);
6388         h->cmd_pool = pci_alloc_consistent(h->pdev,
6389                     h->nr_cmds * sizeof(*h->cmd_pool),
6390                     &(h->cmd_pool_dhandle));
6391         h->errinfo_pool = pci_alloc_consistent(h->pdev,
6392                     h->nr_cmds * sizeof(*h->errinfo_pool),
6393                     &(h->errinfo_pool_dhandle));
6394         if ((h->cmd_pool_bits == NULL)
6395             || (h->cmd_pool == NULL)
6396             || (h->errinfo_pool == NULL)) {
6397                 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6398                 goto clean_up;
6399         }
6400         return 0;
6401 clean_up:
6402         hpsa_free_cmd_pool(h);
6403         return -ENOMEM;
6404 }
6405
6406 static void hpsa_free_cmd_pool(struct ctlr_info *h)
6407 {
6408         kfree(h->cmd_pool_bits);
6409         if (h->cmd_pool)
6410                 pci_free_consistent(h->pdev,
6411                             h->nr_cmds * sizeof(struct CommandList),
6412                             h->cmd_pool, h->cmd_pool_dhandle);
6413         if (h->ioaccel2_cmd_pool)
6414                 pci_free_consistent(h->pdev,
6415                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6416                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6417         if (h->errinfo_pool)
6418                 pci_free_consistent(h->pdev,
6419                             h->nr_cmds * sizeof(struct ErrorInfo),
6420                             h->errinfo_pool,
6421                             h->errinfo_pool_dhandle);
6422         if (h->ioaccel_cmd_pool)
6423                 pci_free_consistent(h->pdev,
6424                         h->nr_cmds * sizeof(struct io_accel1_cmd),
6425                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6426 }
6427
6428 static void hpsa_irq_affinity_hints(struct ctlr_info *h)
6429 {
6430         int i, cpu;
6431
6432         cpu = cpumask_first(cpu_online_mask);
6433         for (i = 0; i < h->msix_vector; i++) {
6434                 irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu));
6435                 cpu = cpumask_next(cpu, cpu_online_mask);
6436         }
6437 }
6438
6439 /* clear affinity hints and free MSI-X, MSI, or legacy INTx vectors */
6440 static void hpsa_free_irqs(struct ctlr_info *h)
6441 {
6442         int i;
6443
6444         if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6445                 /* Single reply queue, only one irq to free */
6446                 i = h->intr_mode;
6447                 irq_set_affinity_hint(h->intr[i], NULL);
6448                 free_irq(h->intr[i], &h->q[i]);
6449                 return;
6450         }
6451
6452         for (i = 0; i < h->msix_vector; i++) {
6453                 irq_set_affinity_hint(h->intr[i], NULL);
6454                 free_irq(h->intr[i], &h->q[i]);
6455         }
6456         for (; i < MAX_REPLY_QUEUES; i++)
6457                 h->q[i] = 0;
6458 }
6459
6460 /* returns 0 on success; cleans up and returns -Enn on error */
6461 static int hpsa_request_irqs(struct ctlr_info *h,
6462         irqreturn_t (*msixhandler)(int, void *),
6463         irqreturn_t (*intxhandler)(int, void *))
6464 {
6465         int rc, i;
6466
6467         /*
6468          * initialize h->q[x] = x so that interrupt handlers know which
6469          * queue to process.
6470          */
6471         for (i = 0; i < MAX_REPLY_QUEUES; i++)
6472                 h->q[i] = (u8) i;
6473
6474         if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
6475                 /* If performant mode and MSI-X, use multiple reply queues */
6476                 for (i = 0; i < h->msix_vector; i++) {
6477                         rc = request_irq(h->intr[i], msixhandler,
6478                                         0, h->devname,
6479                                         &h->q[i]);
6480                         if (rc) {
6481                                 int j;
6482
6483                                 dev_err(&h->pdev->dev,
6484                                         "failed to get irq %d for %s\n",
6485                                        h->intr[i], h->devname);
6486                                 for (j = 0; j < i; j++) {
6487                                         free_irq(h->intr[j], &h->q[j]);
6488                                         h->q[j] = 0;
6489                                 }
6490                                 for (; j < MAX_REPLY_QUEUES; j++)
6491                                         h->q[j] = 0;
6492                                 return rc;
6493                         }
6494                 }
6495                 hpsa_irq_affinity_hints(h);
6496         } else {
6497                 /* Use single reply pool */
6498                 if (h->msix_vector > 0 || h->msi_vector) {
6499                         rc = request_irq(h->intr[h->intr_mode],
6500                                 msixhandler, 0, h->devname,
6501                                 &h->q[h->intr_mode]);
6502                 } else {
6503                         rc = request_irq(h->intr[h->intr_mode],
6504                                 intxhandler, IRQF_SHARED, h->devname,
6505                                 &h->q[h->intr_mode]);
6506                 }
6507         }
6508         if (rc) {
6509                 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6510                        h->intr[h->intr_mode], h->devname);
6511                 return -ENODEV;
6512         }
6513         return 0;
6514 }
6515
6516 static int hpsa_kdump_soft_reset(struct ctlr_info *h)
6517 {
6518         if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6519                 HPSA_RESET_TYPE_CONTROLLER)) {
6520                 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6521                 return -EIO;
6522         }
6523
6524         dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6525         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6526                 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6527                 return -1;
6528         }
6529
6530         dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6531         if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6532                 dev_warn(&h->pdev->dev, "Board failed to become ready "
6533                         "after soft reset.\n");
6534                 return -1;
6535         }
6536
6537         return 0;
6538 }
6539
6540 static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
6541 {
6542         hpsa_free_irqs(h);
6543 #ifdef CONFIG_PCI_MSI
6544         if (h->msix_vector) {
6545                 if (h->pdev->msix_enabled)
6546                         pci_disable_msix(h->pdev);
6547         } else if (h->msi_vector) {
6548                 if (h->pdev->msi_enabled)
6549                         pci_disable_msi(h->pdev);
6550         }
6551 #endif /* CONFIG_PCI_MSI */
6552 }
6553
6554 static void hpsa_free_reply_queues(struct ctlr_info *h)
6555 {
6556         int i;
6557
6558         for (i = 0; i < h->nreply_queues; i++) {
6559                 if (!h->reply_queue[i].head)
6560                         continue;
6561                 pci_free_consistent(h->pdev, h->reply_queue_size,
6562                         h->reply_queue[i].head, h->reply_queue[i].busaddr);
6563                 h->reply_queue[i].head = NULL;
6564                 h->reply_queue[i].busaddr = 0;
6565         }
6566 }
6567
6568 static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6569 {
6570         hpsa_free_irqs_and_disable_msix(h);
6571         hpsa_free_sg_chain_blocks(h);
6572         hpsa_free_cmd_pool(h);
6573         kfree(h->ioaccel1_blockFetchTable);
6574         kfree(h->blockFetchTable);
6575         hpsa_free_reply_queues(h);
6576         if (h->vaddr)
6577                 iounmap(h->vaddr);
6578         if (h->transtable)
6579                 iounmap(h->transtable);
6580         if (h->cfgtable)
6581                 iounmap(h->cfgtable);
6582         pci_disable_device(h->pdev);
6583         pci_release_regions(h->pdev);
6584         kfree(h);
6585 }
6586
6587 /* Called when controller lockup detected. */
6588 static void fail_all_outstanding_cmds(struct ctlr_info *h)
6589 {
6590         int i, refcount;
6591         struct CommandList *c;
6592
6593         flush_workqueue(h->resubmit_wq); /* ensure all cmds are fully built */
6594         for (i = 0; i < h->nr_cmds; i++) {
6595                 c = h->cmd_pool + i;
6596                 refcount = atomic_inc_return(&c->refcount);
6597                 if (refcount > 1) {
6598                         c->err_info->CommandStatus = CMD_HARDWARE_ERR;
6599                         finish_cmd(c);
6600                 }
6601                 cmd_free(h, c);
6602         }
6603 }
6604
6605 static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value)
6606 {
6607         int i, cpu;
6608
6609         cpu = cpumask_first(cpu_online_mask);
6610         for (i = 0; i < num_online_cpus(); i++) {
6611                 u32 *lockup_detected;
6612                 lockup_detected = per_cpu_ptr(h->lockup_detected, cpu);
6613                 *lockup_detected = value;
6614                 cpu = cpumask_next(cpu, cpu_online_mask);
6615         }
6616         wmb(); /* be sure the per-cpu variables are out to memory */
6617 }
6618
6619 static void controller_lockup_detected(struct ctlr_info *h)
6620 {
6621         unsigned long flags;
6622         u32 lockup_detected;
6623
6624         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6625         spin_lock_irqsave(&h->lock, flags);
6626         lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6627         if (!lockup_detected) {
6628                 /* no heartbeat, but controller gave us a zero. */
6629                 dev_warn(&h->pdev->dev,
6630                         "lockup detected but scratchpad register is zero\n");
6631                 lockup_detected = 0xffffffff;
6632         }
6633         set_lockup_detected_for_all_cpus(h, lockup_detected);
6634         spin_unlock_irqrestore(&h->lock, flags);
6635         dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6636                         lockup_detected);
6637         pci_disable_device(h->pdev);
6638         fail_all_outstanding_cmds(h);
6639 }
6640
6641 static void detect_controller_lockup(struct ctlr_info *h)
6642 {
6643         u64 now;
6644         u32 heartbeat;
6645         unsigned long flags;
6646
6647         now = get_jiffies_64();
6648         /* If we've received an interrupt recently, we're ok. */
6649         if (time_after64(h->last_intr_timestamp +
6650                                 (h->heartbeat_sample_interval), now))
6651                 return;
6652
6653         /*
6654          * If we've already checked the heartbeat recently, we're ok.
6655          * This could happen if someone sends us a signal. We
6656          * otherwise don't care about signals in this thread.
6657          */
6658         if (time_after64(h->last_heartbeat_timestamp +
6659                                 (h->heartbeat_sample_interval), now))
6660                 return;
6661
6662         /* If heartbeat has not changed since we last looked, we're not ok. */
6663         spin_lock_irqsave(&h->lock, flags);
6664         heartbeat = readl(&h->cfgtable->HeartBeat);
6665         spin_unlock_irqrestore(&h->lock, flags);
6666         if (h->last_heartbeat == heartbeat) {
6667                 controller_lockup_detected(h);
6668                 return;
6669         }
6670
6671         /* We're ok. */
6672         h->last_heartbeat = heartbeat;
6673         h->last_heartbeat_timestamp = now;
6674 }
6675
6676 static void hpsa_ack_ctlr_events(struct ctlr_info *h)
6677 {
6678         int i;
6679         char *event_type;
6680
6681         /* Ask the controller to clear the events we're handling. */
6682         if ((h->transMethod & (CFGTBL_Trans_io_accel1
6683                         | CFGTBL_Trans_io_accel2)) &&
6684                 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6685                  h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6686
6687                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6688                         event_type = "state change";
6689                 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6690                         event_type = "configuration change";
6691                 /* Stop sending new RAID offload reqs via the IO accelerator */
6692                 scsi_block_requests(h->scsi_host);
6693                 for (i = 0; i < h->ndevices; i++)
6694                         h->dev[i]->offload_enabled = 0;
6695                 hpsa_drain_accel_commands(h);
6696                 /* Set 'accelerator path config change' bit */
6697                 dev_warn(&h->pdev->dev,
6698                         "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6699                         h->events, event_type);
6700                 writel(h->events, &(h->cfgtable->clear_event_notify));
6701                 /* Set the "clear event notify field update" bit 6 */
6702                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6703                 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6704                 hpsa_wait_for_clear_event_notify_ack(h);
6705                 scsi_unblock_requests(h->scsi_host);
6706         } else {
6707                 /* Acknowledge controller notification events. */
6708                 writel(h->events, &(h->cfgtable->clear_event_notify));
6709                 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6710                 hpsa_wait_for_clear_event_notify_ack(h);
6711 #if 0
6712                 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6713                 hpsa_wait_for_mode_change_ack(h);
6714 #endif
6715         }
6716         return;
6717 }
6718
6719 /* Check a register on the controller to see if there are configuration
6720  * changes (added/changed/removed logical drives, etc.) which mean that
6721  * we should rescan the controller for devices.
6722  * Also check flag for driver-initiated rescan.
6723  */
6724 static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
6725 {
6726         if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
6727                 return 0;
6728
6729         h->events = readl(&(h->cfgtable->event_notify));
6730         return h->events & RESCAN_REQUIRED_EVENT_BITS;
6731 }
6732
6733 /*
6734  * Check if any of the offline devices have become ready
6735  */
6736 static int hpsa_offline_devices_ready(struct ctlr_info *h)
6737 {
6738         unsigned long flags;
6739         struct offline_device_entry *d;
6740         struct list_head *this, *tmp;
6741
6742         spin_lock_irqsave(&h->offline_device_lock, flags);
6743         list_for_each_safe(this, tmp, &h->offline_device_list) {
6744                 d = list_entry(this, struct offline_device_entry,
6745                                 offline_list);
6746                 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6747                 if (!hpsa_volume_offline(h, d->scsi3addr)) {
6748                         spin_lock_irqsave(&h->offline_device_lock, flags);
6749                         list_del(&d->offline_list);
6750                         spin_unlock_irqrestore(&h->offline_device_lock, flags);
6751                         return 1;
6752                 }
6753                 spin_lock_irqsave(&h->offline_device_lock, flags);
6754         }
6755         spin_unlock_irqrestore(&h->offline_device_lock, flags);
6756         return 0;
6757 }
6758
6759
6760 static void hpsa_monitor_ctlr_worker(struct work_struct *work)
6761 {
6762         unsigned long flags;
6763         struct ctlr_info *h = container_of(to_delayed_work(work),
6764                                         struct ctlr_info, monitor_ctlr_work);
6765         detect_controller_lockup(h);
6766         if (lockup_detected(h))
6767                 return;
6768
6769         if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6770                 scsi_host_get(h->scsi_host);
6771                 hpsa_ack_ctlr_events(h);
6772                 hpsa_scan_start(h->scsi_host);
6773                 scsi_host_put(h->scsi_host);
6774         }
6775
6776         spin_lock_irqsave(&h->lock, flags);
6777         if (h->remove_in_progress) {
6778                 spin_unlock_irqrestore(&h->lock, flags);
6779                 return;
6780         }
6781         schedule_delayed_work(&h->monitor_ctlr_work,
6782                                 h->heartbeat_sample_interval);
6783         spin_unlock_irqrestore(&h->lock, flags);
6784 }
6785
6786 static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6787 {
6788         int dac, rc;
6789         struct ctlr_info *h;
6790         int try_soft_reset = 0;
6791         unsigned long flags;
6792
6793         if (number_of_controllers == 0)
6794                 printk(KERN_INFO DRIVER_NAME "\n");
6795
6796         rc = hpsa_init_reset_devices(pdev);
6797         if (rc) {
6798                 if (rc != -ENOTSUPP)
6799                         return rc;
6800                 /* If the reset fails in a particular way (it has no way to do
6801                  * a proper hard reset, so returns -ENOTSUPP) we can try to do
6802                  * a soft reset once we get the controller configured up to the
6803                  * point that it can accept a command.
6804                  */
6805                 try_soft_reset = 1;
6806                 rc = 0;
6807         }
6808
6809 reinit_after_soft_reset:
6810
6811         /* Command structures must be aligned on a 32-byte boundary because
6812          * the 5 lower bits of the address are used by the hardware. and by
6813          * the driver.  See comments in hpsa.h for more info.
6814          */
6815         BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
6816         h = kzalloc(sizeof(*h), GFP_KERNEL);
6817         if (!h)
6818                 return -ENOMEM;
6819
6820         h->pdev = pdev;
6821         h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
6822         INIT_LIST_HEAD(&h->offline_device_list);
6823         spin_lock_init(&h->lock);
6824         spin_lock_init(&h->offline_device_lock);
6825         spin_lock_init(&h->scan_lock);
6826         atomic_set(&h->passthru_cmds_avail, HPSA_MAX_CONCURRENT_PASSTHRUS);
6827
6828         h->resubmit_wq = alloc_workqueue("hpsa", WQ_MEM_RECLAIM, 0);
6829         if (!h->resubmit_wq) {
6830                 dev_err(&h->pdev->dev, "Failed to allocate work queue\n");
6831                 rc = -ENOMEM;
6832                 goto clean1;
6833         }
6834         /* Allocate and clear per-cpu variable lockup_detected */
6835         h->lockup_detected = alloc_percpu(u32);
6836         if (!h->lockup_detected) {
6837                 rc = -ENOMEM;
6838                 goto clean1;
6839         }
6840         set_lockup_detected_for_all_cpus(h, 0);
6841
6842         rc = hpsa_pci_init(h);
6843         if (rc != 0)
6844                 goto clean1;
6845
6846         sprintf(h->devname, HPSA "%d", number_of_controllers);
6847         h->ctlr = number_of_controllers;
6848         number_of_controllers++;
6849
6850         /* configure PCI DMA stuff */
6851         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6852         if (rc == 0) {
6853                 dac = 1;
6854         } else {
6855                 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6856                 if (rc == 0) {
6857                         dac = 0;
6858                 } else {
6859                         dev_err(&pdev->dev, "no suitable DMA available\n");
6860                         goto clean1;
6861                 }
6862         }
6863
6864         /* make sure the board interrupts are off */
6865         h->access.set_intr_mask(h, HPSA_INTR_OFF);
6866
6867         if (hpsa_request_irqs(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
6868                 goto clean2;
6869         dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6870                h->devname, pdev->device,
6871                h->intr[h->intr_mode], dac ? "" : " not");
6872         rc = hpsa_allocate_cmd_pool(h);
6873         if (rc)
6874                 goto clean2_and_free_irqs;
6875         if (hpsa_allocate_sg_chain_blocks(h))
6876                 goto clean4;
6877         init_waitqueue_head(&h->scan_wait_queue);
6878         h->scan_finished = 1; /* no scan currently in progress */
6879
6880         pci_set_drvdata(pdev, h);
6881         h->ndevices = 0;
6882         h->hba_mode_enabled = 0;
6883         h->scsi_host = NULL;
6884         spin_lock_init(&h->devlock);
6885         hpsa_put_ctlr_into_performant_mode(h);
6886
6887         /* At this point, the controller is ready to take commands.
6888          * Now, if reset_devices and the hard reset didn't work, try
6889          * the soft reset and see if that works.
6890          */
6891         if (try_soft_reset) {
6892
6893                 /* This is kind of gross.  We may or may not get a completion
6894                  * from the soft reset command, and if we do, then the value
6895                  * from the fifo may or may not be valid.  So, we wait 10 secs
6896                  * after the reset throwing away any completions we get during
6897                  * that time.  Unregister the interrupt handler and register
6898                  * fake ones to scoop up any residual completions.
6899                  */
6900                 spin_lock_irqsave(&h->lock, flags);
6901                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6902                 spin_unlock_irqrestore(&h->lock, flags);
6903                 hpsa_free_irqs(h);
6904                 rc = hpsa_request_irqs(h, hpsa_msix_discard_completions,
6905                                         hpsa_intx_discard_completions);
6906                 if (rc) {
6907                         dev_warn(&h->pdev->dev,
6908                                 "Failed to request_irq after soft reset.\n");
6909                         goto clean4;
6910                 }
6911
6912                 rc = hpsa_kdump_soft_reset(h);
6913                 if (rc)
6914                         /* Neither hard nor soft reset worked, we're hosed. */
6915                         goto clean4;
6916
6917                 dev_info(&h->pdev->dev, "Board READY.\n");
6918                 dev_info(&h->pdev->dev,
6919                         "Waiting for stale completions to drain.\n");
6920                 h->access.set_intr_mask(h, HPSA_INTR_ON);
6921                 msleep(10000);
6922                 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6923
6924                 rc = controller_reset_failed(h->cfgtable);
6925                 if (rc)
6926                         dev_info(&h->pdev->dev,
6927                                 "Soft reset appears to have failed.\n");
6928
6929                 /* since the controller's reset, we have to go back and re-init
6930                  * everything.  Easiest to just forget what we've done and do it
6931                  * all over again.
6932                  */
6933                 hpsa_undo_allocations_after_kdump_soft_reset(h);
6934                 try_soft_reset = 0;
6935                 if (rc)
6936                         /* don't go to clean4, we already unallocated */
6937                         return -ENODEV;
6938
6939                 goto reinit_after_soft_reset;
6940         }
6941
6942                 /* Enable Accelerated IO path at driver layer */
6943                 h->acciopath_status = 1;
6944
6945
6946         /* Turn the interrupts on so we can service requests */
6947         h->access.set_intr_mask(h, HPSA_INTR_ON);
6948
6949         hpsa_hba_inquiry(h);
6950         hpsa_register_scsi(h);  /* hook ourselves into SCSI subsystem */
6951
6952         /* Monitor the controller for firmware lockups */
6953         h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
6954         INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
6955         schedule_delayed_work(&h->monitor_ctlr_work,
6956                                 h->heartbeat_sample_interval);
6957         return 0;
6958
6959 clean4:
6960         hpsa_free_sg_chain_blocks(h);
6961         hpsa_free_cmd_pool(h);
6962 clean2_and_free_irqs:
6963         hpsa_free_irqs(h);
6964 clean2:
6965 clean1:
6966         if (h->resubmit_wq)
6967                 destroy_workqueue(h->resubmit_wq);
6968         if (h->lockup_detected)
6969                 free_percpu(h->lockup_detected);
6970         kfree(h);
6971         return rc;
6972 }
6973
6974 static void hpsa_flush_cache(struct ctlr_info *h)
6975 {
6976         char *flush_buf;
6977         struct CommandList *c;
6978
6979         /* Don't bother trying to flush the cache if locked up */
6980         if (unlikely(lockup_detected(h)))
6981                 return;
6982         flush_buf = kzalloc(4, GFP_KERNEL);
6983         if (!flush_buf)
6984                 return;
6985
6986         c = cmd_alloc(h);
6987         if (!c) {
6988                 dev_warn(&h->pdev->dev, "cmd_alloc returned NULL!\n");
6989                 goto out_of_memory;
6990         }
6991         if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
6992                 RAID_CTLR_LUNID, TYPE_CMD)) {
6993                 goto out;
6994         }
6995         hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
6996         if (c->err_info->CommandStatus != 0)
6997 out:
6998                 dev_warn(&h->pdev->dev,
6999                         "error flushing cache on controller\n");
7000         cmd_free(h, c);
7001 out_of_memory:
7002         kfree(flush_buf);
7003 }
7004
7005 static void hpsa_shutdown(struct pci_dev *pdev)
7006 {
7007         struct ctlr_info *h;
7008
7009         h = pci_get_drvdata(pdev);
7010         /* Turn board interrupts off  and send the flush cache command
7011          * sendcmd will turn off interrupt, and send the flush...
7012          * To write all data in the battery backed cache to disks
7013          */
7014         hpsa_flush_cache(h);
7015         h->access.set_intr_mask(h, HPSA_INTR_OFF);
7016         hpsa_free_irqs_and_disable_msix(h);
7017 }
7018
7019 static void hpsa_free_device_info(struct ctlr_info *h)
7020 {
7021         int i;
7022
7023         for (i = 0; i < h->ndevices; i++)
7024                 kfree(h->dev[i]);
7025 }
7026
7027 static void hpsa_remove_one(struct pci_dev *pdev)
7028 {
7029         struct ctlr_info *h;
7030         unsigned long flags;
7031
7032         if (pci_get_drvdata(pdev) == NULL) {
7033                 dev_err(&pdev->dev, "unable to remove device\n");
7034                 return;
7035         }
7036         h = pci_get_drvdata(pdev);
7037
7038         /* Get rid of any controller monitoring work items */
7039         spin_lock_irqsave(&h->lock, flags);
7040         h->remove_in_progress = 1;
7041         cancel_delayed_work(&h->monitor_ctlr_work);
7042         spin_unlock_irqrestore(&h->lock, flags);
7043         hpsa_unregister_scsi(h);        /* unhook from SCSI subsystem */
7044         hpsa_shutdown(pdev);
7045         destroy_workqueue(h->resubmit_wq);
7046         iounmap(h->vaddr);
7047         iounmap(h->transtable);
7048         iounmap(h->cfgtable);
7049         hpsa_free_device_info(h);
7050         hpsa_free_sg_chain_blocks(h);
7051         pci_free_consistent(h->pdev,
7052                 h->nr_cmds * sizeof(struct CommandList),
7053                 h->cmd_pool, h->cmd_pool_dhandle);
7054         pci_free_consistent(h->pdev,
7055                 h->nr_cmds * sizeof(struct ErrorInfo),
7056                 h->errinfo_pool, h->errinfo_pool_dhandle);
7057         hpsa_free_reply_queues(h);
7058         kfree(h->cmd_pool_bits);
7059         kfree(h->blockFetchTable);
7060         kfree(h->ioaccel1_blockFetchTable);
7061         kfree(h->ioaccel2_blockFetchTable);
7062         kfree(h->hba_inquiry_data);
7063         pci_disable_device(pdev);
7064         pci_release_regions(pdev);
7065         free_percpu(h->lockup_detected);
7066         kfree(h);
7067 }
7068
7069 static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7070         __attribute__((unused)) pm_message_t state)
7071 {
7072         return -ENOSYS;
7073 }
7074
7075 static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7076 {
7077         return -ENOSYS;
7078 }
7079
7080 static struct pci_driver hpsa_pci_driver = {
7081         .name = HPSA,
7082         .probe = hpsa_init_one,
7083         .remove = hpsa_remove_one,
7084         .id_table = hpsa_pci_device_id, /* id_table */
7085         .shutdown = hpsa_shutdown,
7086         .suspend = hpsa_suspend,
7087         .resume = hpsa_resume,
7088 };
7089
7090 /* Fill in bucket_map[], given nsgs (the max number of
7091  * scatter gather elements supported) and bucket[],
7092  * which is an array of 8 integers.  The bucket[] array
7093  * contains 8 different DMA transfer sizes (in 16
7094  * byte increments) which the controller uses to fetch
7095  * commands.  This function fills in bucket_map[], which
7096  * maps a given number of scatter gather elements to one of
7097  * the 8 DMA transfer sizes.  The point of it is to allow the
7098  * controller to only do as much DMA as needed to fetch the
7099  * command, with the DMA transfer size encoded in the lower
7100  * bits of the command address.
7101  */
7102 static void  calc_bucket_map(int bucket[], int num_buckets,
7103         int nsgs, int min_blocks, u32 *bucket_map)
7104 {
7105         int i, j, b, size;
7106
7107         /* Note, bucket_map must have nsgs+1 entries. */
7108         for (i = 0; i <= nsgs; i++) {
7109                 /* Compute size of a command with i SG entries */
7110                 size = i + min_blocks;
7111                 b = num_buckets; /* Assume the biggest bucket */
7112                 /* Find the bucket that is just big enough */
7113                 for (j = 0; j < num_buckets; j++) {
7114                         if (bucket[j] >= size) {
7115                                 b = j;
7116                                 break;
7117                         }
7118                 }
7119                 /* for a command with i SG entries, use bucket b. */
7120                 bucket_map[i] = b;
7121         }
7122 }
7123
7124 static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
7125 {
7126         int i;
7127         unsigned long register_value;
7128         unsigned long transMethod = CFGTBL_Trans_Performant |
7129                         (trans_support & CFGTBL_Trans_use_short_tags) |
7130                                 CFGTBL_Trans_enable_directed_msix |
7131                         (trans_support & (CFGTBL_Trans_io_accel1 |
7132                                 CFGTBL_Trans_io_accel2));
7133         struct access_method access = SA5_performant_access;
7134
7135         /* This is a bit complicated.  There are 8 registers on
7136          * the controller which we write to to tell it 8 different
7137          * sizes of commands which there may be.  It's a way of
7138          * reducing the DMA done to fetch each command.  Encoded into
7139          * each command's tag are 3 bits which communicate to the controller
7140          * which of the eight sizes that command fits within.  The size of
7141          * each command depends on how many scatter gather entries there are.
7142          * Each SG entry requires 16 bytes.  The eight registers are programmed
7143          * with the number of 16-byte blocks a command of that size requires.
7144          * The smallest command possible requires 5 such 16 byte blocks.
7145          * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
7146          * blocks.  Note, this only extends to the SG entries contained
7147          * within the command block, and does not extend to chained blocks
7148          * of SG elements.   bft[] contains the eight values we write to
7149          * the registers.  They are not evenly distributed, but have more
7150          * sizes for small commands, and fewer sizes for larger commands.
7151          */
7152         int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
7153 #define MIN_IOACCEL2_BFT_ENTRY 5
7154 #define HPSA_IOACCEL2_HEADER_SZ 4
7155         int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7156                         13, 14, 15, 16, 17, 18, 19,
7157                         HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7158         BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7159         BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7160         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7161                                  16 * MIN_IOACCEL2_BFT_ENTRY);
7162         BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
7163         BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
7164         /*  5 = 1 s/g entry or 4k
7165          *  6 = 2 s/g entry or 8k
7166          *  8 = 4 s/g entry or 16k
7167          * 10 = 6 s/g entry or 24k
7168          */
7169
7170         /* If the controller supports either ioaccel method then
7171          * we can also use the RAID stack submit path that does not
7172          * perform the superfluous readl() after each command submission.
7173          */
7174         if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2))
7175                 access = SA5_performant_access_no_read;
7176
7177         /* Controller spec: zero out this buffer. */
7178         for (i = 0; i < h->nreply_queues; i++)
7179                 memset(h->reply_queue[i].head, 0, h->reply_queue_size);
7180
7181         bft[7] = SG_ENTRIES_IN_CMD + 4;
7182         calc_bucket_map(bft, ARRAY_SIZE(bft),
7183                                 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
7184         for (i = 0; i < 8; i++)
7185                 writel(bft[i], &h->transtable->BlockFetch[i]);
7186
7187         /* size of controller ring buffer */
7188         writel(h->max_commands, &h->transtable->RepQSize);
7189         writel(h->nreply_queues, &h->transtable->RepQCount);
7190         writel(0, &h->transtable->RepQCtrAddrLow32);
7191         writel(0, &h->transtable->RepQCtrAddrHigh32);
7192
7193         for (i = 0; i < h->nreply_queues; i++) {
7194                 writel(0, &h->transtable->RepQAddr[i].upper);
7195                 writel(h->reply_queue[i].busaddr,
7196                         &h->transtable->RepQAddr[i].lower);
7197         }
7198
7199         writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
7200         writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7201         /*
7202          * enable outbound interrupt coalescing in accelerator mode;
7203          */
7204         if (trans_support & CFGTBL_Trans_io_accel1) {
7205                 access = SA5_ioaccel_mode1_access;
7206                 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7207                 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7208         } else {
7209                 if (trans_support & CFGTBL_Trans_io_accel2) {
7210                         access = SA5_ioaccel_mode2_access;
7211                         writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7212                         writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7213                 }
7214         }
7215         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7216         hpsa_wait_for_mode_change_ack(h);
7217         register_value = readl(&(h->cfgtable->TransportActive));
7218         if (!(register_value & CFGTBL_Trans_Performant)) {
7219                 dev_err(&h->pdev->dev,
7220                         "performant mode problem - transport not active\n");
7221                 return;
7222         }
7223         /* Change the access methods to the performant access methods */
7224         h->access = access;
7225         h->transMethod = transMethod;
7226
7227         if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7228                 (trans_support & CFGTBL_Trans_io_accel2)))
7229                 return;
7230
7231         if (trans_support & CFGTBL_Trans_io_accel1) {
7232                 /* Set up I/O accelerator mode */
7233                 for (i = 0; i < h->nreply_queues; i++) {
7234                         writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7235                         h->reply_queue[i].current_entry =
7236                                 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7237                 }
7238                 bft[7] = h->ioaccel_maxsg + 8;
7239                 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7240                                 h->ioaccel1_blockFetchTable);
7241
7242                 /* initialize all reply queue entries to unused */
7243                 for (i = 0; i < h->nreply_queues; i++)
7244                         memset(h->reply_queue[i].head,
7245                                 (u8) IOACCEL_MODE1_REPLY_UNUSED,
7246                                 h->reply_queue_size);
7247
7248                 /* set all the constant fields in the accelerator command
7249                  * frames once at init time to save CPU cycles later.
7250                  */
7251                 for (i = 0; i < h->nr_cmds; i++) {
7252                         struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7253
7254                         cp->function = IOACCEL1_FUNCTION_SCSIIO;
7255                         cp->err_info = (u32) (h->errinfo_pool_dhandle +
7256                                         (i * sizeof(struct ErrorInfo)));
7257                         cp->err_info_len = sizeof(struct ErrorInfo);
7258                         cp->sgl_offset = IOACCEL1_SGLOFFSET;
7259                         cp->host_context_flags =
7260                                 cpu_to_le16(IOACCEL1_HCFLAGS_CISS_FORMAT);
7261                         cp->timeout_sec = 0;
7262                         cp->ReplyQueue = 0;
7263                         cp->tag =
7264                                 cpu_to_le64((i << DIRECT_LOOKUP_SHIFT));
7265                         cp->host_addr =
7266                                 cpu_to_le64(h->ioaccel_cmd_pool_dhandle +
7267                                         (i * sizeof(struct io_accel1_cmd)));
7268                 }
7269         } else if (trans_support & CFGTBL_Trans_io_accel2) {
7270                 u64 cfg_offset, cfg_base_addr_index;
7271                 u32 bft2_offset, cfg_base_addr;
7272                 int rc;
7273
7274                 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7275                         &cfg_base_addr_index, &cfg_offset);
7276                 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7277                 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7278                 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7279                                 4, h->ioaccel2_blockFetchTable);
7280                 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7281                 BUILD_BUG_ON(offsetof(struct CfgTable,
7282                                 io_accel_request_size_offset) != 0xb8);
7283                 h->ioaccel2_bft2_regs =
7284                         remap_pci_mem(pci_resource_start(h->pdev,
7285                                         cfg_base_addr_index) +
7286                                         cfg_offset + bft2_offset,
7287                                         ARRAY_SIZE(bft2) *
7288                                         sizeof(*h->ioaccel2_bft2_regs));
7289                 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7290                         writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
7291         }
7292         writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7293         hpsa_wait_for_mode_change_ack(h);
7294 }
7295
7296 static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7297 {
7298         h->ioaccel_maxsg =
7299                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7300         if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7301                 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7302
7303         /* Command structures must be aligned on a 128-byte boundary
7304          * because the 7 lower bits of the address are used by the
7305          * hardware.
7306          */
7307         BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7308                         IOACCEL1_COMMANDLIST_ALIGNMENT);
7309         h->ioaccel_cmd_pool =
7310                 pci_alloc_consistent(h->pdev,
7311                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7312                         &(h->ioaccel_cmd_pool_dhandle));
7313
7314         h->ioaccel1_blockFetchTable =
7315                 kmalloc(((h->ioaccel_maxsg + 1) *
7316                                 sizeof(u32)), GFP_KERNEL);
7317
7318         if ((h->ioaccel_cmd_pool == NULL) ||
7319                 (h->ioaccel1_blockFetchTable == NULL))
7320                 goto clean_up;
7321
7322         memset(h->ioaccel_cmd_pool, 0,
7323                 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7324         return 0;
7325
7326 clean_up:
7327         if (h->ioaccel_cmd_pool)
7328                 pci_free_consistent(h->pdev,
7329                         h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7330                         h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7331         kfree(h->ioaccel1_blockFetchTable);
7332         return 1;
7333 }
7334
7335 static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7336 {
7337         /* Allocate ioaccel2 mode command blocks and block fetch table */
7338
7339         h->ioaccel_maxsg =
7340                 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7341         if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7342                 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7343
7344         BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7345                         IOACCEL2_COMMANDLIST_ALIGNMENT);
7346         h->ioaccel2_cmd_pool =
7347                 pci_alloc_consistent(h->pdev,
7348                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7349                         &(h->ioaccel2_cmd_pool_dhandle));
7350
7351         h->ioaccel2_blockFetchTable =
7352                 kmalloc(((h->ioaccel_maxsg + 1) *
7353                                 sizeof(u32)), GFP_KERNEL);
7354
7355         if ((h->ioaccel2_cmd_pool == NULL) ||
7356                 (h->ioaccel2_blockFetchTable == NULL))
7357                 goto clean_up;
7358
7359         memset(h->ioaccel2_cmd_pool, 0,
7360                 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7361         return 0;
7362
7363 clean_up:
7364         if (h->ioaccel2_cmd_pool)
7365                 pci_free_consistent(h->pdev,
7366                         h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7367                         h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7368         kfree(h->ioaccel2_blockFetchTable);
7369         return 1;
7370 }
7371
7372 static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
7373 {
7374         u32 trans_support;
7375         unsigned long transMethod = CFGTBL_Trans_Performant |
7376                                         CFGTBL_Trans_use_short_tags;
7377         int i;
7378
7379         if (hpsa_simple_mode)
7380                 return;
7381
7382         trans_support = readl(&(h->cfgtable->TransportSupport));
7383         if (!(trans_support & PERFORMANT_MODE))
7384                 return;
7385
7386         /* Check for I/O accelerator mode support */
7387         if (trans_support & CFGTBL_Trans_io_accel1) {
7388                 transMethod |= CFGTBL_Trans_io_accel1 |
7389                                 CFGTBL_Trans_enable_directed_msix;
7390                 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7391                         goto clean_up;
7392         } else {
7393                 if (trans_support & CFGTBL_Trans_io_accel2) {
7394                                 transMethod |= CFGTBL_Trans_io_accel2 |
7395                                 CFGTBL_Trans_enable_directed_msix;
7396                 if (ioaccel2_alloc_cmds_and_bft(h))
7397                         goto clean_up;
7398                 }
7399         }
7400
7401         h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
7402         hpsa_get_max_perf_mode_cmds(h);
7403         /* Performant mode ring buffer and supporting data structures */
7404         h->reply_queue_size = h->max_commands * sizeof(u64);
7405
7406         for (i = 0; i < h->nreply_queues; i++) {
7407                 h->reply_queue[i].head = pci_alloc_consistent(h->pdev,
7408                                                 h->reply_queue_size,
7409                                                 &(h->reply_queue[i].busaddr));
7410                 if (!h->reply_queue[i].head)
7411                         goto clean_up;
7412                 h->reply_queue[i].size = h->max_commands;
7413                 h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */
7414                 h->reply_queue[i].current_entry = 0;
7415         }
7416
7417         /* Need a block fetch table for performant mode */
7418         h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
7419                                 sizeof(u32)), GFP_KERNEL);
7420         if (!h->blockFetchTable)
7421                 goto clean_up;
7422
7423         hpsa_enter_performant_mode(h, trans_support);
7424         return;
7425
7426 clean_up:
7427         hpsa_free_reply_queues(h);
7428         kfree(h->blockFetchTable);
7429 }
7430
7431 static int is_accelerated_cmd(struct CommandList *c)
7432 {
7433         return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7434 }
7435
7436 static void hpsa_drain_accel_commands(struct ctlr_info *h)
7437 {
7438         struct CommandList *c = NULL;
7439         int i, accel_cmds_out;
7440         int refcount;
7441
7442         do { /* wait for all outstanding ioaccel commands to drain out */
7443                 accel_cmds_out = 0;
7444                 for (i = 0; i < h->nr_cmds; i++) {
7445                         c = h->cmd_pool + i;
7446                         refcount = atomic_inc_return(&c->refcount);
7447                         if (refcount > 1) /* Command is allocated */
7448                                 accel_cmds_out += is_accelerated_cmd(c);
7449                         cmd_free(h, c);
7450                 }
7451                 if (accel_cmds_out <= 0)
7452                         break;
7453                 msleep(100);
7454         } while (1);
7455 }
7456
7457 /*
7458  *  This is it.  Register the PCI driver information for the cards we control
7459  *  the OS will call our registered routines when it finds one of our cards.
7460  */
7461 static int __init hpsa_init(void)
7462 {
7463         return pci_register_driver(&hpsa_pci_driver);
7464 }
7465
7466 static void __exit hpsa_cleanup(void)
7467 {
7468         pci_unregister_driver(&hpsa_pci_driver);
7469 }
7470
7471 static void __attribute__((unused)) verify_offsets(void)
7472 {
7473 #define VERIFY_OFFSET(member, offset) \
7474         BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7475
7476         VERIFY_OFFSET(structure_size, 0);
7477         VERIFY_OFFSET(volume_blk_size, 4);
7478         VERIFY_OFFSET(volume_blk_cnt, 8);
7479         VERIFY_OFFSET(phys_blk_shift, 16);
7480         VERIFY_OFFSET(parity_rotation_shift, 17);
7481         VERIFY_OFFSET(strip_size, 18);
7482         VERIFY_OFFSET(disk_starting_blk, 20);
7483         VERIFY_OFFSET(disk_blk_cnt, 28);
7484         VERIFY_OFFSET(data_disks_per_row, 36);
7485         VERIFY_OFFSET(metadata_disks_per_row, 38);
7486         VERIFY_OFFSET(row_cnt, 40);
7487         VERIFY_OFFSET(layout_map_count, 42);
7488         VERIFY_OFFSET(flags, 44);
7489         VERIFY_OFFSET(dekindex, 46);
7490         /* VERIFY_OFFSET(reserved, 48 */
7491         VERIFY_OFFSET(data, 64);
7492
7493 #undef VERIFY_OFFSET
7494
7495 #define VERIFY_OFFSET(member, offset) \
7496         BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7497
7498         VERIFY_OFFSET(IU_type, 0);
7499         VERIFY_OFFSET(direction, 1);
7500         VERIFY_OFFSET(reply_queue, 2);
7501         /* VERIFY_OFFSET(reserved1, 3);  */
7502         VERIFY_OFFSET(scsi_nexus, 4);
7503         VERIFY_OFFSET(Tag, 8);
7504         VERIFY_OFFSET(cdb, 16);
7505         VERIFY_OFFSET(cciss_lun, 32);
7506         VERIFY_OFFSET(data_len, 40);
7507         VERIFY_OFFSET(cmd_priority_task_attr, 44);
7508         VERIFY_OFFSET(sg_count, 45);
7509         /* VERIFY_OFFSET(reserved3 */
7510         VERIFY_OFFSET(err_ptr, 48);
7511         VERIFY_OFFSET(err_len, 56);
7512         /* VERIFY_OFFSET(reserved4  */
7513         VERIFY_OFFSET(sg, 64);
7514
7515 #undef VERIFY_OFFSET
7516
7517 #define VERIFY_OFFSET(member, offset) \
7518         BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7519
7520         VERIFY_OFFSET(dev_handle, 0x00);
7521         VERIFY_OFFSET(reserved1, 0x02);
7522         VERIFY_OFFSET(function, 0x03);
7523         VERIFY_OFFSET(reserved2, 0x04);
7524         VERIFY_OFFSET(err_info, 0x0C);
7525         VERIFY_OFFSET(reserved3, 0x10);
7526         VERIFY_OFFSET(err_info_len, 0x12);
7527         VERIFY_OFFSET(reserved4, 0x13);
7528         VERIFY_OFFSET(sgl_offset, 0x14);
7529         VERIFY_OFFSET(reserved5, 0x15);
7530         VERIFY_OFFSET(transfer_len, 0x1C);
7531         VERIFY_OFFSET(reserved6, 0x20);
7532         VERIFY_OFFSET(io_flags, 0x24);
7533         VERIFY_OFFSET(reserved7, 0x26);
7534         VERIFY_OFFSET(LUN, 0x34);
7535         VERIFY_OFFSET(control, 0x3C);
7536         VERIFY_OFFSET(CDB, 0x40);
7537         VERIFY_OFFSET(reserved8, 0x50);
7538         VERIFY_OFFSET(host_context_flags, 0x60);
7539         VERIFY_OFFSET(timeout_sec, 0x62);
7540         VERIFY_OFFSET(ReplyQueue, 0x64);
7541         VERIFY_OFFSET(reserved9, 0x65);
7542         VERIFY_OFFSET(tag, 0x68);
7543         VERIFY_OFFSET(host_addr, 0x70);
7544         VERIFY_OFFSET(CISS_LUN, 0x78);
7545         VERIFY_OFFSET(SG, 0x78 + 8);
7546 #undef VERIFY_OFFSET
7547 }
7548
7549 module_init(hpsa_init);
7550 module_exit(hpsa_cleanup);