2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
49 #define DRV_NAME "ahci"
50 #define DRV_VERSION "3.0"
52 /* Enclosure Management Control */
53 #define EM_CTRL_MSG_TYPE 0x000f0000
55 /* Enclosure Management LED Message Type */
56 #define EM_MSG_LED_HBA_PORT 0x0000000f
57 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
58 #define EM_MSG_LED_VALUE 0xffff0000
59 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
60 #define EM_MSG_LED_VALUE_OFF 0xfff80000
61 #define EM_MSG_LED_VALUE_ON 0x00010000
63 static int ahci_skip_host_reset;
64 static int ahci_ignore_sss;
66 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
67 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
69 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
70 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
72 static int ahci_enable_alpm(struct ata_port *ap,
74 static void ahci_disable_alpm(struct ata_port *ap);
75 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
76 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
78 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
86 AHCI_MAX_SG = 168, /* hardware max is 64K */
87 AHCI_DMA_BOUNDARY = 0xffffffff,
90 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
92 AHCI_CMD_TBL_CDB = 0x40,
93 AHCI_CMD_TBL_HDR_SZ = 0x80,
94 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
95 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
96 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
98 AHCI_IRQ_ON_SG = (1 << 31),
99 AHCI_CMD_ATAPI = (1 << 5),
100 AHCI_CMD_WRITE = (1 << 6),
101 AHCI_CMD_PREFETCH = (1 << 7),
102 AHCI_CMD_RESET = (1 << 8),
103 AHCI_CMD_CLR_BUSY = (1 << 10),
105 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
106 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
107 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
110 board_ahci_vt8251 = 1,
111 board_ahci_ign_iferr = 2,
112 board_ahci_sb600 = 3,
114 board_ahci_sb700 = 5, /* for SB700 and SB800 */
115 board_ahci_mcp65 = 6,
116 board_ahci_nopmp = 7,
118 /* global controller registers */
119 HOST_CAP = 0x00, /* host capabilities */
120 HOST_CTL = 0x04, /* global host control */
121 HOST_IRQ_STAT = 0x08, /* interrupt status */
122 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
123 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
124 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
125 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
128 HOST_RESET = (1 << 0), /* reset controller; self-clear */
129 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
130 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
133 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
134 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
135 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
136 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
137 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
138 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
139 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
140 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
141 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
143 /* registers for each SATA port */
144 PORT_LST_ADDR = 0x00, /* command list DMA addr */
145 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
146 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
147 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
148 PORT_IRQ_STAT = 0x10, /* interrupt status */
149 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
150 PORT_CMD = 0x18, /* port command */
151 PORT_TFDATA = 0x20, /* taskfile data */
152 PORT_SIG = 0x24, /* device TF signature */
153 PORT_CMD_ISSUE = 0x38, /* command issue */
154 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
155 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
156 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
157 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
158 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
160 /* PORT_IRQ_{STAT,MASK} bits */
161 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
162 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
163 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
164 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
165 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
166 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
167 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
168 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
170 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
171 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
172 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
173 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
174 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
175 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
176 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
177 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
178 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
180 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
186 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
188 PORT_IRQ_HBUS_DATA_ERR,
189 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
190 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
191 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
194 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
195 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
196 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
197 PORT_CMD_PMP = (1 << 17), /* PMP attached */
198 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
199 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
200 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
201 PORT_CMD_CLO = (1 << 3), /* Command list override */
202 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
203 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
204 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
206 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
207 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
208 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
209 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
211 /* hpriv->flags bits */
212 AHCI_HFLAG_NO_NCQ = (1 << 0),
213 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
214 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
215 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
216 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
217 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
218 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
219 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
220 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
221 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
225 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
226 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
227 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
230 ICH_MAP = 0x90, /* ICH MAP register */
233 EM_CTL_RST = (1 << 9), /* Reset */
234 EM_CTL_TM = (1 << 8), /* Transmit Message */
235 EM_CTL_ALHD = (1 << 26), /* Activity LED */
238 struct ahci_cmd_hdr {
253 struct ahci_em_priv {
254 enum sw_activity blink_policy;
255 struct timer_list timer;
256 unsigned long saved_activity;
257 unsigned long activity;
258 unsigned long led_state;
261 struct ahci_host_priv {
262 unsigned int flags; /* AHCI_HFLAG_* */
263 u32 cap; /* cap to use */
264 u32 port_map; /* port map to use */
265 u32 saved_cap; /* saved initial cap */
266 u32 saved_port_map; /* saved initial port_map */
267 u32 em_loc; /* enclosure management location */
270 struct ahci_port_priv {
271 struct ata_link *active_link;
272 struct ahci_cmd_hdr *cmd_slot;
273 dma_addr_t cmd_slot_dma;
275 dma_addr_t cmd_tbl_dma;
277 dma_addr_t rx_fis_dma;
278 /* for NCQ spurious interrupt analysis */
279 unsigned int ncq_saw_d2h:1;
280 unsigned int ncq_saw_dmas:1;
281 unsigned int ncq_saw_sdb:1;
282 u32 intr_mask; /* interrupts to enable */
283 struct ahci_em_priv em_priv[MAX_SLOTS];/* enclosure management info
287 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
288 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
289 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
290 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
291 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
292 static int ahci_port_start(struct ata_port *ap);
293 static void ahci_port_stop(struct ata_port *ap);
294 static void ahci_qc_prep(struct ata_queued_cmd *qc);
295 static void ahci_freeze(struct ata_port *ap);
296 static void ahci_thaw(struct ata_port *ap);
297 static void ahci_pmp_attach(struct ata_port *ap);
298 static void ahci_pmp_detach(struct ata_port *ap);
299 static int ahci_softreset(struct ata_link *link, unsigned int *class,
300 unsigned long deadline);
301 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
302 unsigned long deadline);
303 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
304 unsigned long deadline);
305 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
306 unsigned long deadline);
307 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
308 unsigned long deadline);
309 static void ahci_postreset(struct ata_link *link, unsigned int *class);
310 static void ahci_error_handler(struct ata_port *ap);
311 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
312 static int ahci_port_resume(struct ata_port *ap);
313 static void ahci_dev_config(struct ata_device *dev);
314 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl);
315 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
318 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
319 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
320 static int ahci_pci_device_resume(struct pci_dev *pdev);
322 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
323 static ssize_t ahci_activity_store(struct ata_device *dev,
324 enum sw_activity val);
325 static void ahci_init_sw_activity(struct ata_link *link);
327 static struct device_attribute *ahci_shost_attrs[] = {
328 &dev_attr_link_power_management_policy,
329 &dev_attr_em_message_type,
330 &dev_attr_em_message,
334 static struct device_attribute *ahci_sdev_attrs[] = {
335 &dev_attr_sw_activity,
336 &dev_attr_unload_heads,
340 static struct scsi_host_template ahci_sht = {
341 ATA_NCQ_SHT(DRV_NAME),
342 .can_queue = AHCI_MAX_CMDS - 1,
343 .sg_tablesize = AHCI_MAX_SG,
344 .dma_boundary = AHCI_DMA_BOUNDARY,
345 .shost_attrs = ahci_shost_attrs,
346 .sdev_attrs = ahci_sdev_attrs,
349 static struct ata_port_operations ahci_ops = {
350 .inherits = &sata_pmp_port_ops,
352 .qc_defer = sata_pmp_qc_defer_cmd_switch,
353 .qc_prep = ahci_qc_prep,
354 .qc_issue = ahci_qc_issue,
355 .qc_fill_rtf = ahci_qc_fill_rtf,
357 .freeze = ahci_freeze,
359 .softreset = ahci_softreset,
360 .hardreset = ahci_hardreset,
361 .postreset = ahci_postreset,
362 .pmp_softreset = ahci_softreset,
363 .error_handler = ahci_error_handler,
364 .post_internal_cmd = ahci_post_internal_cmd,
365 .dev_config = ahci_dev_config,
367 .scr_read = ahci_scr_read,
368 .scr_write = ahci_scr_write,
369 .pmp_attach = ahci_pmp_attach,
370 .pmp_detach = ahci_pmp_detach,
372 .enable_pm = ahci_enable_alpm,
373 .disable_pm = ahci_disable_alpm,
374 .em_show = ahci_led_show,
375 .em_store = ahci_led_store,
376 .sw_activity_show = ahci_activity_show,
377 .sw_activity_store = ahci_activity_store,
379 .port_suspend = ahci_port_suspend,
380 .port_resume = ahci_port_resume,
382 .port_start = ahci_port_start,
383 .port_stop = ahci_port_stop,
386 static struct ata_port_operations ahci_vt8251_ops = {
387 .inherits = &ahci_ops,
388 .hardreset = ahci_vt8251_hardreset,
391 static struct ata_port_operations ahci_p5wdh_ops = {
392 .inherits = &ahci_ops,
393 .hardreset = ahci_p5wdh_hardreset,
396 static struct ata_port_operations ahci_sb600_ops = {
397 .inherits = &ahci_ops,
398 .softreset = ahci_sb600_softreset,
399 .pmp_softreset = ahci_sb600_softreset,
402 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
404 static const struct ata_port_info ahci_port_info[] = {
407 .flags = AHCI_FLAG_COMMON,
408 .pio_mask = ATA_PIO4,
409 .udma_mask = ATA_UDMA6,
410 .port_ops = &ahci_ops,
412 /* board_ahci_vt8251 */
414 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
415 .flags = AHCI_FLAG_COMMON,
416 .pio_mask = ATA_PIO4,
417 .udma_mask = ATA_UDMA6,
418 .port_ops = &ahci_vt8251_ops,
420 /* board_ahci_ign_iferr */
422 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
423 .flags = AHCI_FLAG_COMMON,
424 .pio_mask = ATA_PIO4,
425 .udma_mask = ATA_UDMA6,
426 .port_ops = &ahci_ops,
428 /* board_ahci_sb600 */
430 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
431 AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
433 .flags = AHCI_FLAG_COMMON,
434 .pio_mask = ATA_PIO4,
435 .udma_mask = ATA_UDMA6,
436 .port_ops = &ahci_sb600_ops,
440 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
441 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
442 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
443 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
444 .pio_mask = ATA_PIO4,
445 .udma_mask = ATA_UDMA6,
446 .port_ops = &ahci_ops,
448 /* board_ahci_sb700, for SB700 and SB800 */
450 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
451 .flags = AHCI_FLAG_COMMON,
452 .pio_mask = ATA_PIO4,
453 .udma_mask = ATA_UDMA6,
454 .port_ops = &ahci_sb600_ops,
456 /* board_ahci_mcp65 */
458 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
459 .flags = AHCI_FLAG_COMMON,
460 .pio_mask = ATA_PIO4,
461 .udma_mask = ATA_UDMA6,
462 .port_ops = &ahci_ops,
464 /* board_ahci_nopmp */
466 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
467 .flags = AHCI_FLAG_COMMON,
468 .pio_mask = ATA_PIO4,
469 .udma_mask = ATA_UDMA6,
470 .port_ops = &ahci_ops,
474 static const struct pci_device_id ahci_pci_tbl[] = {
476 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
477 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
478 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
479 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
480 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
481 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
482 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
483 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
484 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
485 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
486 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
487 { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* ICH8 */
488 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
489 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
490 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
491 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
492 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
493 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
494 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
495 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
496 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
497 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
498 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
499 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
500 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
501 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
502 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
503 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
504 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
505 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
506 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
507 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
508 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
509 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
510 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
512 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
513 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
514 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
517 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
518 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
519 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
520 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
521 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
522 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
523 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
526 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
527 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
530 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
531 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
532 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
533 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
534 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
535 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
536 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
537 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
538 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */
539 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */
540 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */
541 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci }, /* MCP67 */
542 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci }, /* MCP67 */
543 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci }, /* MCP67 */
544 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci }, /* MCP67 */
545 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci }, /* MCP67 */
546 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci }, /* MCP67 */
547 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci }, /* MCP67 */
548 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci }, /* MCP67 */
549 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci }, /* MCP67 */
550 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci }, /* MCP73 */
551 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci }, /* MCP73 */
552 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci }, /* MCP73 */
553 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci }, /* MCP73 */
554 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci }, /* MCP73 */
555 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci }, /* MCP73 */
556 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci }, /* MCP73 */
557 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci }, /* MCP73 */
558 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci }, /* MCP73 */
559 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci }, /* MCP73 */
560 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci }, /* MCP73 */
561 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci }, /* MCP73 */
562 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
563 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
564 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
565 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
566 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
567 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
568 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
569 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
570 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
571 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
572 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
573 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
574 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
575 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
576 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
577 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
578 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
579 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
580 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
581 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
582 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
583 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
584 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
585 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
586 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
587 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
588 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
589 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
590 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
591 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
592 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
593 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
594 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
595 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
596 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
597 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
600 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
601 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
602 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
605 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
606 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
609 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
611 /* Generic, PCI class code for AHCI */
612 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
613 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
615 { } /* terminate list */
619 static struct pci_driver ahci_pci_driver = {
621 .id_table = ahci_pci_tbl,
622 .probe = ahci_init_one,
623 .remove = ata_pci_remove_one,
625 .suspend = ahci_pci_device_suspend,
626 .resume = ahci_pci_device_resume,
630 static int ahci_em_messages = 1;
631 module_param(ahci_em_messages, int, 0444);
632 /* add other LED protocol types when they become supported */
633 MODULE_PARM_DESC(ahci_em_messages,
634 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
636 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
637 static int marvell_enable;
639 static int marvell_enable = 1;
641 module_param(marvell_enable, int, 0644);
642 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
645 static inline int ahci_nr_ports(u32 cap)
647 return (cap & 0x1f) + 1;
650 static inline void __iomem *__ahci_port_base(struct ata_host *host,
651 unsigned int port_no)
653 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
655 return mmio + 0x100 + (port_no * 0x80);
658 static inline void __iomem *ahci_port_base(struct ata_port *ap)
660 return __ahci_port_base(ap->host, ap->port_no);
663 static void ahci_enable_ahci(void __iomem *mmio)
668 /* turn on AHCI_EN */
669 tmp = readl(mmio + HOST_CTL);
670 if (tmp & HOST_AHCI_EN)
673 /* Some controllers need AHCI_EN to be written multiple times.
674 * Try a few times before giving up.
676 for (i = 0; i < 5; i++) {
678 writel(tmp, mmio + HOST_CTL);
679 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
680 if (tmp & HOST_AHCI_EN)
689 * ahci_save_initial_config - Save and fixup initial config values
690 * @pdev: target PCI device
691 * @hpriv: host private area to store config values
693 * Some registers containing configuration info might be setup by
694 * BIOS and might be cleared on reset. This function saves the
695 * initial values of those registers into @hpriv such that they
696 * can be restored after controller reset.
698 * If inconsistent, config values are fixed up by this function.
703 static void ahci_save_initial_config(struct pci_dev *pdev,
704 struct ahci_host_priv *hpriv)
706 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
711 /* make sure AHCI mode is enabled before accessing CAP */
712 ahci_enable_ahci(mmio);
714 /* Values prefixed with saved_ are written back to host after
715 * reset. Values without are used for driver operation.
717 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
718 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
720 /* some chips have errata preventing 64bit use */
721 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
722 dev_printk(KERN_INFO, &pdev->dev,
723 "controller can't do 64bit DMA, forcing 32bit\n");
727 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
728 dev_printk(KERN_INFO, &pdev->dev,
729 "controller can't do NCQ, turning off CAP_NCQ\n");
730 cap &= ~HOST_CAP_NCQ;
733 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
734 dev_printk(KERN_INFO, &pdev->dev,
735 "controller can do NCQ, turning on CAP_NCQ\n");
739 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
740 dev_printk(KERN_INFO, &pdev->dev,
741 "controller can't do PMP, turning off CAP_PMP\n");
742 cap &= ~HOST_CAP_PMP;
745 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
747 dev_printk(KERN_INFO, &pdev->dev,
748 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
754 * Temporary Marvell 6145 hack: PATA port presence
755 * is asserted through the standard AHCI port
756 * presence register, as bit 4 (counting from 0)
758 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
759 if (pdev->device == 0x6121)
763 dev_printk(KERN_ERR, &pdev->dev,
764 "MV_AHCI HACK: port_map %x -> %x\n",
767 dev_printk(KERN_ERR, &pdev->dev,
768 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
773 /* cross check port_map and cap.n_ports */
777 for (i = 0; i < AHCI_MAX_PORTS; i++)
778 if (port_map & (1 << i))
781 /* If PI has more ports than n_ports, whine, clear
782 * port_map and let it be generated from n_ports.
784 if (map_ports > ahci_nr_ports(cap)) {
785 dev_printk(KERN_WARNING, &pdev->dev,
786 "implemented port map (0x%x) contains more "
787 "ports than nr_ports (%u), using nr_ports\n",
788 port_map, ahci_nr_ports(cap));
793 /* fabricate port_map from cap.nr_ports */
795 port_map = (1 << ahci_nr_ports(cap)) - 1;
796 dev_printk(KERN_WARNING, &pdev->dev,
797 "forcing PORTS_IMPL to 0x%x\n", port_map);
799 /* write the fixed up value to the PI register */
800 hpriv->saved_port_map = port_map;
803 /* record values to use during operation */
805 hpriv->port_map = port_map;
809 * ahci_restore_initial_config - Restore initial config
810 * @host: target ATA host
812 * Restore initial config stored by ahci_save_initial_config().
817 static void ahci_restore_initial_config(struct ata_host *host)
819 struct ahci_host_priv *hpriv = host->private_data;
820 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
822 writel(hpriv->saved_cap, mmio + HOST_CAP);
823 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
824 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
827 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
829 static const int offset[] = {
830 [SCR_STATUS] = PORT_SCR_STAT,
831 [SCR_CONTROL] = PORT_SCR_CTL,
832 [SCR_ERROR] = PORT_SCR_ERR,
833 [SCR_ACTIVE] = PORT_SCR_ACT,
834 [SCR_NOTIFICATION] = PORT_SCR_NTF,
836 struct ahci_host_priv *hpriv = ap->host->private_data;
838 if (sc_reg < ARRAY_SIZE(offset) &&
839 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
840 return offset[sc_reg];
844 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
846 void __iomem *port_mmio = ahci_port_base(link->ap);
847 int offset = ahci_scr_offset(link->ap, sc_reg);
850 *val = readl(port_mmio + offset);
856 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
858 void __iomem *port_mmio = ahci_port_base(link->ap);
859 int offset = ahci_scr_offset(link->ap, sc_reg);
862 writel(val, port_mmio + offset);
868 static void ahci_start_engine(struct ata_port *ap)
870 void __iomem *port_mmio = ahci_port_base(ap);
874 tmp = readl(port_mmio + PORT_CMD);
875 tmp |= PORT_CMD_START;
876 writel(tmp, port_mmio + PORT_CMD);
877 readl(port_mmio + PORT_CMD); /* flush */
880 static int ahci_stop_engine(struct ata_port *ap)
882 void __iomem *port_mmio = ahci_port_base(ap);
885 tmp = readl(port_mmio + PORT_CMD);
887 /* check if the HBA is idle */
888 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
891 /* setting HBA to idle */
892 tmp &= ~PORT_CMD_START;
893 writel(tmp, port_mmio + PORT_CMD);
895 /* wait for engine to stop. This could be as long as 500 msec */
896 tmp = ata_wait_register(port_mmio + PORT_CMD,
897 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
898 if (tmp & PORT_CMD_LIST_ON)
904 static void ahci_start_fis_rx(struct ata_port *ap)
906 void __iomem *port_mmio = ahci_port_base(ap);
907 struct ahci_host_priv *hpriv = ap->host->private_data;
908 struct ahci_port_priv *pp = ap->private_data;
911 /* set FIS registers */
912 if (hpriv->cap & HOST_CAP_64)
913 writel((pp->cmd_slot_dma >> 16) >> 16,
914 port_mmio + PORT_LST_ADDR_HI);
915 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
917 if (hpriv->cap & HOST_CAP_64)
918 writel((pp->rx_fis_dma >> 16) >> 16,
919 port_mmio + PORT_FIS_ADDR_HI);
920 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
922 /* enable FIS reception */
923 tmp = readl(port_mmio + PORT_CMD);
924 tmp |= PORT_CMD_FIS_RX;
925 writel(tmp, port_mmio + PORT_CMD);
928 readl(port_mmio + PORT_CMD);
931 static int ahci_stop_fis_rx(struct ata_port *ap)
933 void __iomem *port_mmio = ahci_port_base(ap);
936 /* disable FIS reception */
937 tmp = readl(port_mmio + PORT_CMD);
938 tmp &= ~PORT_CMD_FIS_RX;
939 writel(tmp, port_mmio + PORT_CMD);
941 /* wait for completion, spec says 500ms, give it 1000 */
942 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
943 PORT_CMD_FIS_ON, 10, 1000);
944 if (tmp & PORT_CMD_FIS_ON)
950 static void ahci_power_up(struct ata_port *ap)
952 struct ahci_host_priv *hpriv = ap->host->private_data;
953 void __iomem *port_mmio = ahci_port_base(ap);
956 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
959 if (hpriv->cap & HOST_CAP_SSS) {
960 cmd |= PORT_CMD_SPIN_UP;
961 writel(cmd, port_mmio + PORT_CMD);
965 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
968 static void ahci_disable_alpm(struct ata_port *ap)
970 struct ahci_host_priv *hpriv = ap->host->private_data;
971 void __iomem *port_mmio = ahci_port_base(ap);
973 struct ahci_port_priv *pp = ap->private_data;
975 /* IPM bits should be disabled by libata-core */
976 /* get the existing command bits */
977 cmd = readl(port_mmio + PORT_CMD);
979 /* disable ALPM and ASP */
980 cmd &= ~PORT_CMD_ASP;
981 cmd &= ~PORT_CMD_ALPE;
983 /* force the interface back to active */
984 cmd |= PORT_CMD_ICC_ACTIVE;
986 /* write out new cmd value */
987 writel(cmd, port_mmio + PORT_CMD);
988 cmd = readl(port_mmio + PORT_CMD);
990 /* wait 10ms to be sure we've come out of any low power state */
993 /* clear out any PhyRdy stuff from interrupt status */
994 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
996 /* go ahead and clean out PhyRdy Change from Serror too */
997 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1000 * Clear flag to indicate that we should ignore all PhyRdy
1003 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1006 * Enable interrupts on Phy Ready.
1008 pp->intr_mask |= PORT_IRQ_PHYRDY;
1009 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1012 * don't change the link pm policy - we can be called
1013 * just to turn of link pm temporarily
1017 static int ahci_enable_alpm(struct ata_port *ap,
1018 enum link_pm policy)
1020 struct ahci_host_priv *hpriv = ap->host->private_data;
1021 void __iomem *port_mmio = ahci_port_base(ap);
1023 struct ahci_port_priv *pp = ap->private_data;
1026 /* Make sure the host is capable of link power management */
1027 if (!(hpriv->cap & HOST_CAP_ALPM))
1031 case MAX_PERFORMANCE:
1034 * if we came here with NOT_AVAILABLE,
1035 * it just means this is the first time we
1036 * have tried to enable - default to max performance,
1037 * and let the user go to lower power modes on request.
1039 ahci_disable_alpm(ap);
1042 /* configure HBA to enter SLUMBER */
1046 /* configure HBA to enter PARTIAL */
1054 * Disable interrupts on Phy Ready. This keeps us from
1055 * getting woken up due to spurious phy ready interrupts
1056 * TBD - Hot plug should be done via polling now, is
1057 * that even supported?
1059 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1060 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1063 * Set a flag to indicate that we should ignore all PhyRdy
1064 * state changes since these can happen now whenever we
1067 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1069 /* get the existing command bits */
1070 cmd = readl(port_mmio + PORT_CMD);
1073 * Set ASP based on Policy
1078 * Setting this bit will instruct the HBA to aggressively
1079 * enter a lower power link state when it's appropriate and
1080 * based on the value set above for ASP
1082 cmd |= PORT_CMD_ALPE;
1084 /* write out new cmd value */
1085 writel(cmd, port_mmio + PORT_CMD);
1086 cmd = readl(port_mmio + PORT_CMD);
1088 /* IPM bits should be set by libata-core */
1093 static void ahci_power_down(struct ata_port *ap)
1095 struct ahci_host_priv *hpriv = ap->host->private_data;
1096 void __iomem *port_mmio = ahci_port_base(ap);
1099 if (!(hpriv->cap & HOST_CAP_SSS))
1102 /* put device into listen mode, first set PxSCTL.DET to 0 */
1103 scontrol = readl(port_mmio + PORT_SCR_CTL);
1105 writel(scontrol, port_mmio + PORT_SCR_CTL);
1107 /* then set PxCMD.SUD to 0 */
1108 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1109 cmd &= ~PORT_CMD_SPIN_UP;
1110 writel(cmd, port_mmio + PORT_CMD);
1114 static void ahci_start_port(struct ata_port *ap)
1116 struct ahci_port_priv *pp = ap->private_data;
1117 struct ata_link *link;
1118 struct ahci_em_priv *emp;
1122 /* enable FIS reception */
1123 ahci_start_fis_rx(ap);
1126 ahci_start_engine(ap);
1129 if (ap->flags & ATA_FLAG_EM) {
1130 ata_for_each_link(link, ap, EDGE) {
1131 emp = &pp->em_priv[link->pmp];
1133 /* EM Transmit bit maybe busy during init */
1134 for (i = 0; i < MAX_RETRY; i++) {
1135 rc = ahci_transmit_led_message(ap,
1146 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1147 ata_for_each_link(link, ap, EDGE)
1148 ahci_init_sw_activity(link);
1152 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1157 rc = ahci_stop_engine(ap);
1159 *emsg = "failed to stop engine";
1163 /* disable FIS reception */
1164 rc = ahci_stop_fis_rx(ap);
1166 *emsg = "failed stop FIS RX";
1173 static int ahci_reset_controller(struct ata_host *host)
1175 struct pci_dev *pdev = to_pci_dev(host->dev);
1176 struct ahci_host_priv *hpriv = host->private_data;
1177 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1180 /* we must be in AHCI mode, before using anything
1181 * AHCI-specific, such as HOST_RESET.
1183 ahci_enable_ahci(mmio);
1185 /* global controller reset */
1186 if (!ahci_skip_host_reset) {
1187 tmp = readl(mmio + HOST_CTL);
1188 if ((tmp & HOST_RESET) == 0) {
1189 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1190 readl(mmio + HOST_CTL); /* flush */
1194 * to perform host reset, OS should set HOST_RESET
1195 * and poll until this bit is read to be "0".
1196 * reset must complete within 1 second, or
1197 * the hardware should be considered fried.
1199 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1200 HOST_RESET, 10, 1000);
1202 if (tmp & HOST_RESET) {
1203 dev_printk(KERN_ERR, host->dev,
1204 "controller reset failed (0x%x)\n", tmp);
1208 /* turn on AHCI mode */
1209 ahci_enable_ahci(mmio);
1211 /* Some registers might be cleared on reset. Restore
1214 ahci_restore_initial_config(host);
1216 dev_printk(KERN_INFO, host->dev,
1217 "skipping global host reset\n");
1219 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1223 pci_read_config_word(pdev, 0x92, &tmp16);
1224 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1225 tmp16 |= hpriv->port_map;
1226 pci_write_config_word(pdev, 0x92, tmp16);
1233 static void ahci_sw_activity(struct ata_link *link)
1235 struct ata_port *ap = link->ap;
1236 struct ahci_port_priv *pp = ap->private_data;
1237 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1239 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1243 if (!timer_pending(&emp->timer))
1244 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1247 static void ahci_sw_activity_blink(unsigned long arg)
1249 struct ata_link *link = (struct ata_link *)arg;
1250 struct ata_port *ap = link->ap;
1251 struct ahci_port_priv *pp = ap->private_data;
1252 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1253 unsigned long led_message = emp->led_state;
1254 u32 activity_led_state;
1255 unsigned long flags;
1257 led_message &= EM_MSG_LED_VALUE;
1258 led_message |= ap->port_no | (link->pmp << 8);
1260 /* check to see if we've had activity. If so,
1261 * toggle state of LED and reset timer. If not,
1262 * turn LED to desired idle state.
1264 spin_lock_irqsave(ap->lock, flags);
1265 if (emp->saved_activity != emp->activity) {
1266 emp->saved_activity = emp->activity;
1267 /* get the current LED state */
1268 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1270 if (activity_led_state)
1271 activity_led_state = 0;
1273 activity_led_state = 1;
1275 /* clear old state */
1276 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1279 led_message |= (activity_led_state << 16);
1280 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1282 /* switch to idle */
1283 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1284 if (emp->blink_policy == BLINK_OFF)
1285 led_message |= (1 << 16);
1287 spin_unlock_irqrestore(ap->lock, flags);
1288 ahci_transmit_led_message(ap, led_message, 4);
1291 static void ahci_init_sw_activity(struct ata_link *link)
1293 struct ata_port *ap = link->ap;
1294 struct ahci_port_priv *pp = ap->private_data;
1295 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1297 /* init activity stats, setup timer */
1298 emp->saved_activity = emp->activity = 0;
1299 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1301 /* check our blink policy and set flag for link if it's enabled */
1302 if (emp->blink_policy)
1303 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1306 static int ahci_reset_em(struct ata_host *host)
1308 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1311 em_ctl = readl(mmio + HOST_EM_CTL);
1312 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1315 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1319 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1322 struct ahci_host_priv *hpriv = ap->host->private_data;
1323 struct ahci_port_priv *pp = ap->private_data;
1324 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
1326 u32 message[] = {0, 0};
1327 unsigned long flags;
1329 struct ahci_em_priv *emp;
1331 /* get the slot number from the message */
1332 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1333 if (pmp < MAX_SLOTS)
1334 emp = &pp->em_priv[pmp];
1338 spin_lock_irqsave(ap->lock, flags);
1341 * if we are still busy transmitting a previous message,
1344 em_ctl = readl(mmio + HOST_EM_CTL);
1345 if (em_ctl & EM_CTL_TM) {
1346 spin_unlock_irqrestore(ap->lock, flags);
1351 * create message header - this is all zero except for
1352 * the message size, which is 4 bytes.
1354 message[0] |= (4 << 8);
1356 /* ignore 0:4 of byte zero, fill in port info yourself */
1357 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1359 /* write message to EM_LOC */
1360 writel(message[0], mmio + hpriv->em_loc);
1361 writel(message[1], mmio + hpriv->em_loc+4);
1363 /* save off new led state for port/slot */
1364 emp->led_state = state;
1367 * tell hardware to transmit the message
1369 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1371 spin_unlock_irqrestore(ap->lock, flags);
1375 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1377 struct ahci_port_priv *pp = ap->private_data;
1378 struct ata_link *link;
1379 struct ahci_em_priv *emp;
1382 ata_for_each_link(link, ap, EDGE) {
1383 emp = &pp->em_priv[link->pmp];
1384 rc += sprintf(buf, "%lx\n", emp->led_state);
1389 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1394 struct ahci_port_priv *pp = ap->private_data;
1395 struct ahci_em_priv *emp;
1397 state = simple_strtoul(buf, NULL, 0);
1399 /* get the slot number from the message */
1400 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1401 if (pmp < MAX_SLOTS)
1402 emp = &pp->em_priv[pmp];
1406 /* mask off the activity bits if we are in sw_activity
1407 * mode, user should turn off sw_activity before setting
1408 * activity led through em_message
1410 if (emp->blink_policy)
1411 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1413 return ahci_transmit_led_message(ap, state, size);
1416 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1418 struct ata_link *link = dev->link;
1419 struct ata_port *ap = link->ap;
1420 struct ahci_port_priv *pp = ap->private_data;
1421 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1422 u32 port_led_state = emp->led_state;
1424 /* save the desired Activity LED behavior */
1427 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1429 /* set the LED to OFF */
1430 port_led_state &= EM_MSG_LED_VALUE_OFF;
1431 port_led_state |= (ap->port_no | (link->pmp << 8));
1432 ahci_transmit_led_message(ap, port_led_state, 4);
1434 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1435 if (val == BLINK_OFF) {
1436 /* set LED to ON for idle */
1437 port_led_state &= EM_MSG_LED_VALUE_OFF;
1438 port_led_state |= (ap->port_no | (link->pmp << 8));
1439 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1440 ahci_transmit_led_message(ap, port_led_state, 4);
1443 emp->blink_policy = val;
1447 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1449 struct ata_link *link = dev->link;
1450 struct ata_port *ap = link->ap;
1451 struct ahci_port_priv *pp = ap->private_data;
1452 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1454 /* display the saved value of activity behavior for this
1457 return sprintf(buf, "%d\n", emp->blink_policy);
1460 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1461 int port_no, void __iomem *mmio,
1462 void __iomem *port_mmio)
1464 const char *emsg = NULL;
1468 /* make sure port is not active */
1469 rc = ahci_deinit_port(ap, &emsg);
1471 dev_printk(KERN_WARNING, &pdev->dev,
1472 "%s (%d)\n", emsg, rc);
1475 tmp = readl(port_mmio + PORT_SCR_ERR);
1476 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1477 writel(tmp, port_mmio + PORT_SCR_ERR);
1479 /* clear port IRQ */
1480 tmp = readl(port_mmio + PORT_IRQ_STAT);
1481 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1483 writel(tmp, port_mmio + PORT_IRQ_STAT);
1485 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1488 static void ahci_init_controller(struct ata_host *host)
1490 struct ahci_host_priv *hpriv = host->private_data;
1491 struct pci_dev *pdev = to_pci_dev(host->dev);
1492 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
1494 void __iomem *port_mmio;
1498 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1499 if (pdev->device == 0x6121)
1503 port_mmio = __ahci_port_base(host, mv);
1505 writel(0, port_mmio + PORT_IRQ_MASK);
1507 /* clear port IRQ */
1508 tmp = readl(port_mmio + PORT_IRQ_STAT);
1509 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1511 writel(tmp, port_mmio + PORT_IRQ_STAT);
1514 for (i = 0; i < host->n_ports; i++) {
1515 struct ata_port *ap = host->ports[i];
1517 port_mmio = ahci_port_base(ap);
1518 if (ata_port_is_dummy(ap))
1521 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1524 tmp = readl(mmio + HOST_CTL);
1525 VPRINTK("HOST_CTL 0x%x\n", tmp);
1526 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1527 tmp = readl(mmio + HOST_CTL);
1528 VPRINTK("HOST_CTL 0x%x\n", tmp);
1531 static void ahci_dev_config(struct ata_device *dev)
1533 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1535 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1536 dev->max_sectors = 255;
1537 ata_dev_printk(dev, KERN_INFO,
1538 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1542 static unsigned int ahci_dev_classify(struct ata_port *ap)
1544 void __iomem *port_mmio = ahci_port_base(ap);
1545 struct ata_taskfile tf;
1548 tmp = readl(port_mmio + PORT_SIG);
1549 tf.lbah = (tmp >> 24) & 0xff;
1550 tf.lbam = (tmp >> 16) & 0xff;
1551 tf.lbal = (tmp >> 8) & 0xff;
1552 tf.nsect = (tmp) & 0xff;
1554 return ata_dev_classify(&tf);
1557 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1560 dma_addr_t cmd_tbl_dma;
1562 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1564 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1565 pp->cmd_slot[tag].status = 0;
1566 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1567 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1570 static int ahci_kick_engine(struct ata_port *ap, int force_restart)
1572 void __iomem *port_mmio = ahci_port_base(ap);
1573 struct ahci_host_priv *hpriv = ap->host->private_data;
1574 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1578 /* do we need to kick the port? */
1579 busy = status & (ATA_BUSY | ATA_DRQ);
1580 if (!busy && !force_restart)
1584 rc = ahci_stop_engine(ap);
1588 /* need to do CLO? */
1594 if (!(hpriv->cap & HOST_CAP_CLO)) {
1600 tmp = readl(port_mmio + PORT_CMD);
1601 tmp |= PORT_CMD_CLO;
1602 writel(tmp, port_mmio + PORT_CMD);
1605 tmp = ata_wait_register(port_mmio + PORT_CMD,
1606 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1607 if (tmp & PORT_CMD_CLO)
1610 /* restart engine */
1612 ahci_start_engine(ap);
1616 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1617 struct ata_taskfile *tf, int is_cmd, u16 flags,
1618 unsigned long timeout_msec)
1620 const u32 cmd_fis_len = 5; /* five dwords */
1621 struct ahci_port_priv *pp = ap->private_data;
1622 void __iomem *port_mmio = ahci_port_base(ap);
1623 u8 *fis = pp->cmd_tbl;
1626 /* prep the command */
1627 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1628 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1631 writel(1, port_mmio + PORT_CMD_ISSUE);
1634 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1637 ahci_kick_engine(ap, 1);
1641 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1646 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1647 int pmp, unsigned long deadline,
1648 int (*check_ready)(struct ata_link *link))
1650 struct ata_port *ap = link->ap;
1651 const char *reason = NULL;
1652 unsigned long now, msecs;
1653 struct ata_taskfile tf;
1658 /* prepare for SRST (AHCI-1.1 10.4.1) */
1659 rc = ahci_kick_engine(ap, 1);
1660 if (rc && rc != -EOPNOTSUPP)
1661 ata_link_printk(link, KERN_WARNING,
1662 "failed to reset engine (errno=%d)\n", rc);
1664 ata_tf_init(link->device, &tf);
1666 /* issue the first D2H Register FIS */
1669 if (time_after(now, deadline))
1670 msecs = jiffies_to_msecs(deadline - now);
1673 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1674 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1676 reason = "1st FIS failed";
1680 /* spec says at least 5us, but be generous and sleep for 1ms */
1683 /* issue the second D2H Register FIS */
1684 tf.ctl &= ~ATA_SRST;
1685 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1687 /* wait for link to become ready */
1688 rc = ata_wait_after_reset(link, deadline, check_ready);
1689 /* link occupied, -ENODEV too is an error */
1691 reason = "device not ready";
1694 *class = ahci_dev_classify(ap);
1696 DPRINTK("EXIT, class=%u\n", *class);
1700 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1704 static int ahci_check_ready(struct ata_link *link)
1706 void __iomem *port_mmio = ahci_port_base(link->ap);
1707 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1709 return ata_check_ready(status);
1712 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1713 unsigned long deadline)
1715 int pmp = sata_srst_pmp(link);
1719 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1722 static int ahci_sb600_check_ready(struct ata_link *link)
1724 void __iomem *port_mmio = ahci_port_base(link->ap);
1725 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1726 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1729 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1730 * which can save timeout delay.
1732 if (irq_status & PORT_IRQ_BAD_PMP)
1735 return ata_check_ready(status);
1738 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1739 unsigned long deadline)
1741 struct ata_port *ap = link->ap;
1742 void __iomem *port_mmio = ahci_port_base(ap);
1743 int pmp = sata_srst_pmp(link);
1749 rc = ahci_do_softreset(link, class, pmp, deadline,
1750 ahci_sb600_check_ready);
1753 * Soft reset fails on some ATI chips with IPMS set when PMP
1754 * is enabled but SATA HDD/ODD is connected to SATA port,
1755 * do soft reset again to port 0.
1758 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1759 if (irq_sts & PORT_IRQ_BAD_PMP) {
1760 ata_link_printk(link, KERN_WARNING,
1761 "failed due to HW bug, retry pmp=0\n");
1762 rc = ahci_do_softreset(link, class, 0, deadline,
1770 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1771 unsigned long deadline)
1773 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1774 struct ata_port *ap = link->ap;
1775 struct ahci_port_priv *pp = ap->private_data;
1776 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1777 struct ata_taskfile tf;
1783 ahci_stop_engine(ap);
1785 /* clear D2H reception area to properly wait for D2H FIS */
1786 ata_tf_init(link->device, &tf);
1788 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1790 rc = sata_link_hardreset(link, timing, deadline, &online,
1793 ahci_start_engine(ap);
1796 *class = ahci_dev_classify(ap);
1798 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1802 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1803 unsigned long deadline)
1805 struct ata_port *ap = link->ap;
1811 ahci_stop_engine(ap);
1813 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1814 deadline, &online, NULL);
1816 ahci_start_engine(ap);
1818 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1820 /* vt8251 doesn't clear BSY on signature FIS reception,
1821 * request follow-up softreset.
1823 return online ? -EAGAIN : rc;
1826 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
1827 unsigned long deadline)
1829 struct ata_port *ap = link->ap;
1830 struct ahci_port_priv *pp = ap->private_data;
1831 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1832 struct ata_taskfile tf;
1836 ahci_stop_engine(ap);
1838 /* clear D2H reception area to properly wait for D2H FIS */
1839 ata_tf_init(link->device, &tf);
1841 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1843 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1844 deadline, &online, NULL);
1846 ahci_start_engine(ap);
1848 /* The pseudo configuration device on SIMG4726 attached to
1849 * ASUS P5W-DH Deluxe doesn't send signature FIS after
1850 * hardreset if no device is attached to the first downstream
1851 * port && the pseudo device locks up on SRST w/ PMP==0. To
1852 * work around this, wait for !BSY only briefly. If BSY isn't
1853 * cleared, perform CLO and proceed to IDENTIFY (achieved by
1854 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
1856 * Wait for two seconds. Devices attached to downstream port
1857 * which can't process the following IDENTIFY after this will
1858 * have to be reset again. For most cases, this should
1859 * suffice while making probing snappish enough.
1862 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
1865 ahci_kick_engine(ap, 0);
1870 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1872 struct ata_port *ap = link->ap;
1873 void __iomem *port_mmio = ahci_port_base(ap);
1876 ata_std_postreset(link, class);
1878 /* Make sure port's ATAPI bit is set appropriately */
1879 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1880 if (*class == ATA_DEV_ATAPI)
1881 new_tmp |= PORT_CMD_ATAPI;
1883 new_tmp &= ~PORT_CMD_ATAPI;
1884 if (new_tmp != tmp) {
1885 writel(new_tmp, port_mmio + PORT_CMD);
1886 readl(port_mmio + PORT_CMD); /* flush */
1890 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1892 struct scatterlist *sg;
1893 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1899 * Next, the S/G list.
1901 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1902 dma_addr_t addr = sg_dma_address(sg);
1903 u32 sg_len = sg_dma_len(sg);
1905 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1906 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1907 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1913 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1915 struct ata_port *ap = qc->ap;
1916 struct ahci_port_priv *pp = ap->private_data;
1917 int is_atapi = ata_is_atapi(qc->tf.protocol);
1920 const u32 cmd_fis_len = 5; /* five dwords */
1921 unsigned int n_elem;
1924 * Fill in command table information. First, the header,
1925 * a SATA Register - Host to Device command FIS.
1927 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1929 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1931 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1932 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1936 if (qc->flags & ATA_QCFLAG_DMAMAP)
1937 n_elem = ahci_fill_sg(qc, cmd_tbl);
1940 * Fill in command slot information.
1942 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1943 if (qc->tf.flags & ATA_TFLAG_WRITE)
1944 opts |= AHCI_CMD_WRITE;
1946 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1948 ahci_fill_cmd_slot(pp, qc->tag, opts);
1951 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1953 struct ahci_host_priv *hpriv = ap->host->private_data;
1954 struct ahci_port_priv *pp = ap->private_data;
1955 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1956 struct ata_link *link = NULL;
1957 struct ata_queued_cmd *active_qc;
1958 struct ata_eh_info *active_ehi;
1961 /* determine active link */
1962 ata_for_each_link(link, ap, EDGE)
1963 if (ata_link_active(link))
1968 active_qc = ata_qc_from_tag(ap, link->active_tag);
1969 active_ehi = &link->eh_info;
1971 /* record irq stat */
1972 ata_ehi_clear_desc(host_ehi);
1973 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1975 /* AHCI needs SError cleared; otherwise, it might lock up */
1976 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1977 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1978 host_ehi->serror |= serror;
1980 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1981 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1982 irq_stat &= ~PORT_IRQ_IF_ERR;
1984 if (irq_stat & PORT_IRQ_TF_ERR) {
1985 /* If qc is active, charge it; otherwise, the active
1986 * link. There's no active qc on NCQ errors. It will
1987 * be determined by EH by reading log page 10h.
1990 active_qc->err_mask |= AC_ERR_DEV;
1992 active_ehi->err_mask |= AC_ERR_DEV;
1994 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1995 host_ehi->serror &= ~SERR_INTERNAL;
1998 if (irq_stat & PORT_IRQ_UNK_FIS) {
1999 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2001 active_ehi->err_mask |= AC_ERR_HSM;
2002 active_ehi->action |= ATA_EH_RESET;
2003 ata_ehi_push_desc(active_ehi,
2004 "unknown FIS %08x %08x %08x %08x" ,
2005 unk[0], unk[1], unk[2], unk[3]);
2008 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2009 active_ehi->err_mask |= AC_ERR_HSM;
2010 active_ehi->action |= ATA_EH_RESET;
2011 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2014 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2015 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2016 host_ehi->action |= ATA_EH_RESET;
2017 ata_ehi_push_desc(host_ehi, "host bus error");
2020 if (irq_stat & PORT_IRQ_IF_ERR) {
2021 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2022 host_ehi->action |= ATA_EH_RESET;
2023 ata_ehi_push_desc(host_ehi, "interface fatal error");
2026 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2027 ata_ehi_hotplugged(host_ehi);
2028 ata_ehi_push_desc(host_ehi, "%s",
2029 irq_stat & PORT_IRQ_CONNECT ?
2030 "connection status changed" : "PHY RDY changed");
2033 /* okay, let's hand over to EH */
2035 if (irq_stat & PORT_IRQ_FREEZE)
2036 ata_port_freeze(ap);
2041 static void ahci_port_intr(struct ata_port *ap)
2043 void __iomem *port_mmio = ahci_port_base(ap);
2044 struct ata_eh_info *ehi = &ap->link.eh_info;
2045 struct ahci_port_priv *pp = ap->private_data;
2046 struct ahci_host_priv *hpriv = ap->host->private_data;
2047 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2048 u32 status, qc_active;
2051 status = readl(port_mmio + PORT_IRQ_STAT);
2052 writel(status, port_mmio + PORT_IRQ_STAT);
2054 /* ignore BAD_PMP while resetting */
2055 if (unlikely(resetting))
2056 status &= ~PORT_IRQ_BAD_PMP;
2058 /* If we are getting PhyRdy, this is
2059 * just a power state change, we should
2060 * clear out this, plus the PhyRdy/Comm
2061 * Wake bits from Serror
2063 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2064 (status & PORT_IRQ_PHYRDY)) {
2065 status &= ~PORT_IRQ_PHYRDY;
2066 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2069 if (unlikely(status & PORT_IRQ_ERROR)) {
2070 ahci_error_intr(ap, status);
2074 if (status & PORT_IRQ_SDB_FIS) {
2075 /* If SNotification is available, leave notification
2076 * handling to sata_async_notification(). If not,
2077 * emulate it by snooping SDB FIS RX area.
2079 * Snooping FIS RX area is probably cheaper than
2080 * poking SNotification but some constrollers which
2081 * implement SNotification, ICH9 for example, don't
2082 * store AN SDB FIS into receive area.
2084 if (hpriv->cap & HOST_CAP_SNTF)
2085 sata_async_notification(ap);
2087 /* If the 'N' bit in word 0 of the FIS is set,
2088 * we just received asynchronous notification.
2089 * Tell libata about it.
2091 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2092 u32 f0 = le32_to_cpu(f[0]);
2095 sata_async_notification(ap);
2099 /* pp->active_link is valid iff any command is in flight */
2100 if (ap->qc_active && pp->active_link->sactive)
2101 qc_active = readl(port_mmio + PORT_SCR_ACT);
2103 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2105 rc = ata_qc_complete_multiple(ap, qc_active);
2107 /* while resetting, invalid completions are expected */
2108 if (unlikely(rc < 0 && !resetting)) {
2109 ehi->err_mask |= AC_ERR_HSM;
2110 ehi->action |= ATA_EH_RESET;
2111 ata_port_freeze(ap);
2115 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2117 struct ata_host *host = dev_instance;
2118 struct ahci_host_priv *hpriv;
2119 unsigned int i, handled = 0;
2121 u32 irq_stat, irq_masked;
2125 hpriv = host->private_data;
2126 mmio = host->iomap[AHCI_PCI_BAR];
2128 /* sigh. 0xffffffff is a valid return from h/w */
2129 irq_stat = readl(mmio + HOST_IRQ_STAT);
2133 irq_masked = irq_stat & hpriv->port_map;
2135 spin_lock(&host->lock);
2137 for (i = 0; i < host->n_ports; i++) {
2138 struct ata_port *ap;
2140 if (!(irq_masked & (1 << i)))
2143 ap = host->ports[i];
2146 VPRINTK("port %u\n", i);
2148 VPRINTK("port %u (no irq)\n", i);
2149 if (ata_ratelimit())
2150 dev_printk(KERN_WARNING, host->dev,
2151 "interrupt on disabled port %u\n", i);
2157 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2158 * it should be cleared after all the port events are cleared;
2159 * otherwise, it will raise a spurious interrupt after each
2160 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2163 * Also, use the unmasked value to clear interrupt as spurious
2164 * pending event on a dummy port might cause screaming IRQ.
2166 writel(irq_stat, mmio + HOST_IRQ_STAT);
2168 spin_unlock(&host->lock);
2172 return IRQ_RETVAL(handled);
2175 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2177 struct ata_port *ap = qc->ap;
2178 void __iomem *port_mmio = ahci_port_base(ap);
2179 struct ahci_port_priv *pp = ap->private_data;
2181 /* Keep track of the currently active link. It will be used
2182 * in completion path to determine whether NCQ phase is in
2185 pp->active_link = qc->dev->link;
2187 if (qc->tf.protocol == ATA_PROT_NCQ)
2188 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2189 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2191 ahci_sw_activity(qc->dev->link);
2196 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2198 struct ahci_port_priv *pp = qc->ap->private_data;
2199 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2201 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2205 static void ahci_freeze(struct ata_port *ap)
2207 void __iomem *port_mmio = ahci_port_base(ap);
2210 writel(0, port_mmio + PORT_IRQ_MASK);
2213 static void ahci_thaw(struct ata_port *ap)
2215 void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
2216 void __iomem *port_mmio = ahci_port_base(ap);
2218 struct ahci_port_priv *pp = ap->private_data;
2221 tmp = readl(port_mmio + PORT_IRQ_STAT);
2222 writel(tmp, port_mmio + PORT_IRQ_STAT);
2223 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2225 /* turn IRQ back on */
2226 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2229 static void ahci_error_handler(struct ata_port *ap)
2231 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2232 /* restart engine */
2233 ahci_stop_engine(ap);
2234 ahci_start_engine(ap);
2237 sata_pmp_error_handler(ap);
2240 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2242 struct ata_port *ap = qc->ap;
2244 /* make DMA engine forget about the failed command */
2245 if (qc->flags & ATA_QCFLAG_FAILED)
2246 ahci_kick_engine(ap, 1);
2249 static void ahci_pmp_attach(struct ata_port *ap)
2251 void __iomem *port_mmio = ahci_port_base(ap);
2252 struct ahci_port_priv *pp = ap->private_data;
2255 cmd = readl(port_mmio + PORT_CMD);
2256 cmd |= PORT_CMD_PMP;
2257 writel(cmd, port_mmio + PORT_CMD);
2259 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2260 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2263 static void ahci_pmp_detach(struct ata_port *ap)
2265 void __iomem *port_mmio = ahci_port_base(ap);
2266 struct ahci_port_priv *pp = ap->private_data;
2269 cmd = readl(port_mmio + PORT_CMD);
2270 cmd &= ~PORT_CMD_PMP;
2271 writel(cmd, port_mmio + PORT_CMD);
2273 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2274 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2277 static int ahci_port_resume(struct ata_port *ap)
2280 ahci_start_port(ap);
2282 if (sata_pmp_attached(ap))
2283 ahci_pmp_attach(ap);
2285 ahci_pmp_detach(ap);
2291 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2293 const char *emsg = NULL;
2296 rc = ahci_deinit_port(ap, &emsg);
2298 ahci_power_down(ap);
2300 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2301 ahci_start_port(ap);
2307 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2309 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2310 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2313 if (mesg.event & PM_EVENT_SLEEP) {
2314 /* AHCI spec rev1.1 section 8.3.3:
2315 * Software must disable interrupts prior to requesting a
2316 * transition of the HBA to D3 state.
2318 ctl = readl(mmio + HOST_CTL);
2319 ctl &= ~HOST_IRQ_EN;
2320 writel(ctl, mmio + HOST_CTL);
2321 readl(mmio + HOST_CTL); /* flush */
2324 return ata_pci_device_suspend(pdev, mesg);
2327 static int ahci_pci_device_resume(struct pci_dev *pdev)
2329 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2332 rc = ata_pci_device_do_resume(pdev);
2336 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2337 rc = ahci_reset_controller(host);
2341 ahci_init_controller(host);
2344 ata_host_resume(host);
2350 static int ahci_port_start(struct ata_port *ap)
2352 struct device *dev = ap->host->dev;
2353 struct ahci_port_priv *pp;
2357 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2361 mem = dmam_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma,
2365 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
2368 * First item in chunk of DMA memory: 32-slot command table,
2369 * 32 bytes each in size
2372 pp->cmd_slot_dma = mem_dma;
2374 mem += AHCI_CMD_SLOT_SZ;
2375 mem_dma += AHCI_CMD_SLOT_SZ;
2378 * Second item: Received-FIS area
2381 pp->rx_fis_dma = mem_dma;
2383 mem += AHCI_RX_FIS_SZ;
2384 mem_dma += AHCI_RX_FIS_SZ;
2387 * Third item: data area for storing a single command
2388 * and its scatter-gather table
2391 pp->cmd_tbl_dma = mem_dma;
2394 * Save off initial list of interrupts to be enabled.
2395 * This could be changed later
2397 pp->intr_mask = DEF_PORT_IRQ;
2399 ap->private_data = pp;
2401 /* engage engines, captain */
2402 return ahci_port_resume(ap);
2405 static void ahci_port_stop(struct ata_port *ap)
2407 const char *emsg = NULL;
2410 /* de-initialize port */
2411 rc = ahci_deinit_port(ap, &emsg);
2413 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2416 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2421 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2422 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2424 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2426 dev_printk(KERN_ERR, &pdev->dev,
2427 "64-bit DMA enable failed\n");
2432 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2434 dev_printk(KERN_ERR, &pdev->dev,
2435 "32-bit DMA enable failed\n");
2438 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2440 dev_printk(KERN_ERR, &pdev->dev,
2441 "32-bit consistent DMA enable failed\n");
2448 static void ahci_print_info(struct ata_host *host)
2450 struct ahci_host_priv *hpriv = host->private_data;
2451 struct pci_dev *pdev = to_pci_dev(host->dev);
2452 void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
2453 u32 vers, cap, impl, speed;
2454 const char *speed_s;
2458 vers = readl(mmio + HOST_VERSION);
2460 impl = hpriv->port_map;
2462 speed = (cap >> 20) & 0xf;
2465 else if (speed == 2)
2467 else if (speed == 3)
2472 pci_read_config_word(pdev, 0x0a, &cc);
2473 if (cc == PCI_CLASS_STORAGE_IDE)
2475 else if (cc == PCI_CLASS_STORAGE_SATA)
2477 else if (cc == PCI_CLASS_STORAGE_RAID)
2482 dev_printk(KERN_INFO, &pdev->dev,
2483 "AHCI %02x%02x.%02x%02x "
2484 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2487 (vers >> 24) & 0xff,
2488 (vers >> 16) & 0xff,
2492 ((cap >> 8) & 0x1f) + 1,
2498 dev_printk(KERN_INFO, &pdev->dev,
2505 cap & (1 << 31) ? "64bit " : "",
2506 cap & (1 << 30) ? "ncq " : "",
2507 cap & (1 << 29) ? "sntf " : "",
2508 cap & (1 << 28) ? "ilck " : "",
2509 cap & (1 << 27) ? "stag " : "",
2510 cap & (1 << 26) ? "pm " : "",
2511 cap & (1 << 25) ? "led " : "",
2513 cap & (1 << 24) ? "clo " : "",
2514 cap & (1 << 19) ? "nz " : "",
2515 cap & (1 << 18) ? "only " : "",
2516 cap & (1 << 17) ? "pmp " : "",
2517 cap & (1 << 15) ? "pio " : "",
2518 cap & (1 << 14) ? "slum " : "",
2519 cap & (1 << 13) ? "part " : "",
2520 cap & (1 << 6) ? "ems ": ""
2524 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2525 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2526 * support PMP and the 4726 either directly exports the device
2527 * attached to the first downstream port or acts as a hardware storage
2528 * controller and emulate a single ATA device (can be RAID 0/1 or some
2529 * other configuration).
2531 * When there's no device attached to the first downstream port of the
2532 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2533 * configure the 4726. However, ATA emulation of the device is very
2534 * lame. It doesn't send signature D2H Reg FIS after the initial
2535 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2537 * The following function works around the problem by always using
2538 * hardreset on the port and not depending on receiving signature FIS
2539 * afterward. If signature FIS isn't received soon, ATA class is
2540 * assumed without follow-up softreset.
2542 static void ahci_p5wdh_workaround(struct ata_host *host)
2544 static struct dmi_system_id sysids[] = {
2546 .ident = "P5W DH Deluxe",
2548 DMI_MATCH(DMI_SYS_VENDOR,
2549 "ASUSTEK COMPUTER INC"),
2550 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2555 struct pci_dev *pdev = to_pci_dev(host->dev);
2557 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2558 dmi_check_system(sysids)) {
2559 struct ata_port *ap = host->ports[1];
2561 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2562 "Deluxe on-board SIMG4726 workaround\n");
2564 ap->ops = &ahci_p5wdh_ops;
2565 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2569 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
2571 static const struct dmi_system_id broken_systems[] = {
2573 .ident = "HP Compaq nx6310",
2575 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2576 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
2578 /* PCI slot number of the controller */
2579 .driver_data = (void *)0x1FUL,
2582 .ident = "HP Compaq 6720s",
2584 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
2585 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
2587 /* PCI slot number of the controller */
2588 .driver_data = (void *)0x1FUL,
2591 { } /* terminate list */
2593 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
2596 unsigned long slot = (unsigned long)dmi->driver_data;
2597 /* apply the quirk only to on-board controllers */
2598 return slot == PCI_SLOT(pdev->devfn);
2604 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2606 static int printed_version;
2607 unsigned int board_id = ent->driver_data;
2608 struct ata_port_info pi = ahci_port_info[board_id];
2609 const struct ata_port_info *ppi[] = { &pi, NULL };
2610 struct device *dev = &pdev->dev;
2611 struct ahci_host_priv *hpriv;
2612 struct ata_host *host;
2617 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
2619 if (!printed_version++)
2620 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2622 /* The AHCI driver can only drive the SATA ports, the PATA driver
2623 can drive them all so if both drivers are selected make sure
2624 AHCI stays out of the way */
2625 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
2628 /* acquire resources */
2629 rc = pcim_enable_device(pdev);
2633 /* AHCI controllers often implement SFF compatible interface.
2634 * Grab all PCI BARs just in case.
2636 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
2638 pcim_pin_device(pdev);
2642 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
2643 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
2646 /* ICH6s share the same PCI ID for both piix and ahci
2647 * modes. Enabling ahci mode while MAP indicates
2648 * combined mode is a bad idea. Yield to ata_piix.
2650 pci_read_config_byte(pdev, ICH_MAP, &map);
2652 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
2653 "combined mode, can't enable AHCI mode\n");
2658 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
2661 hpriv->flags |= (unsigned long)pi.private_data;
2663 /* MCP65 revision A1 and A2 can't do MSI */
2664 if (board_id == board_ahci_mcp65 &&
2665 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
2666 hpriv->flags |= AHCI_HFLAG_NO_MSI;
2668 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
2669 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
2670 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
2672 if (!(hpriv->flags & AHCI_HFLAG_NO_MSI))
2673 pci_enable_msi(pdev);
2675 /* save initial config */
2676 ahci_save_initial_config(pdev, hpriv);
2679 if (hpriv->cap & HOST_CAP_NCQ)
2680 pi.flags |= ATA_FLAG_NCQ;
2682 if (hpriv->cap & HOST_CAP_PMP)
2683 pi.flags |= ATA_FLAG_PMP;
2685 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
2687 void __iomem *mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
2688 u32 em_loc = readl(mmio + HOST_EM_LOC);
2689 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2691 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2693 /* we only support LED message type right now */
2694 if ((messages & 0x01) && (ahci_em_messages == 1)) {
2696 hpriv->em_loc = ((em_loc >> 16) * 4);
2697 pi.flags |= ATA_FLAG_EM;
2698 if (!(em_ctl & EM_CTL_ALHD))
2699 pi.flags |= ATA_FLAG_SW_ACTIVITY;
2703 if (ahci_broken_system_poweroff(pdev)) {
2704 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
2705 dev_info(&pdev->dev,
2706 "quirky BIOS, skipping spindown on poweroff\n");
2709 /* CAP.NP sometimes indicate the index of the last enabled
2710 * port, at other times, that of the last possible port, so
2711 * determining the maximum port number requires looking at
2712 * both CAP.NP and port_map.
2714 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
2716 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2719 host->iomap = pcim_iomap_table(pdev);
2720 host->private_data = hpriv;
2722 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
2723 host->flags |= ATA_HOST_PARALLEL_SCAN;
2725 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
2727 if (pi.flags & ATA_FLAG_EM)
2728 ahci_reset_em(host);
2730 for (i = 0; i < host->n_ports; i++) {
2731 struct ata_port *ap = host->ports[i];
2733 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
2734 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
2735 0x100 + ap->port_no * 0x80, "port");
2737 /* set initial link pm policy */
2738 ap->pm_policy = NOT_AVAILABLE;
2740 /* set enclosure management message type */
2741 if (ap->flags & ATA_FLAG_EM)
2742 ap->em_message_type = ahci_em_messages;
2745 /* disabled/not-implemented port */
2746 if (!(hpriv->port_map & (1 << i)))
2747 ap->ops = &ata_dummy_port_ops;
2750 /* apply workaround for ASUS P5W DH Deluxe mainboard */
2751 ahci_p5wdh_workaround(host);
2753 /* initialize adapter */
2754 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
2758 rc = ahci_reset_controller(host);
2762 ahci_init_controller(host);
2763 ahci_print_info(host);
2765 pci_set_master(pdev);
2766 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
2770 static int __init ahci_init(void)
2772 return pci_register_driver(&ahci_pci_driver);
2775 static void __exit ahci_exit(void)
2777 pci_unregister_driver(&ahci_pci_driver);
2781 MODULE_AUTHOR("Jeff Garzik");
2782 MODULE_DESCRIPTION("AHCI SATA low-level driver");
2783 MODULE_LICENSE("GPL");
2784 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
2785 MODULE_VERSION(DRV_VERSION);
2787 module_init(ahci_init);
2788 module_exit(ahci_exit);