2 * libahci.c - Common AHCI SATA low-level routines
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/gfp.h>
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <scsi/scsi_host.h>
45 #include <scsi/scsi_cmnd.h>
46 #include <linux/libata.h>
49 static int ahci_skip_host_reset;
51 EXPORT_SYMBOL_GPL(ahci_ignore_sss);
53 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
54 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
56 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
57 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
59 static int ahci_enable_alpm(struct ata_port *ap,
61 static void ahci_disable_alpm(struct ata_port *ap);
62 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
63 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
65 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
70 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
71 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
72 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
73 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
74 static int ahci_port_start(struct ata_port *ap);
75 static void ahci_port_stop(struct ata_port *ap);
76 static void ahci_qc_prep(struct ata_queued_cmd *qc);
77 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
78 static void ahci_freeze(struct ata_port *ap);
79 static void ahci_thaw(struct ata_port *ap);
80 static void ahci_enable_fbs(struct ata_port *ap);
81 static void ahci_disable_fbs(struct ata_port *ap);
82 static void ahci_pmp_attach(struct ata_port *ap);
83 static void ahci_pmp_detach(struct ata_port *ap);
84 static int ahci_softreset(struct ata_link *link, unsigned int *class,
85 unsigned long deadline);
86 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
87 unsigned long deadline);
88 static void ahci_postreset(struct ata_link *link, unsigned int *class);
89 static void ahci_error_handler(struct ata_port *ap);
90 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
91 static int ahci_port_resume(struct ata_port *ap);
92 static void ahci_dev_config(struct ata_device *dev);
93 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
96 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
98 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
99 static ssize_t ahci_activity_store(struct ata_device *dev,
100 enum sw_activity val);
101 static void ahci_init_sw_activity(struct ata_link *link);
103 static ssize_t ahci_show_host_caps(struct device *dev,
104 struct device_attribute *attr, char *buf);
105 static ssize_t ahci_show_host_cap2(struct device *dev,
106 struct device_attribute *attr, char *buf);
107 static ssize_t ahci_show_host_version(struct device *dev,
108 struct device_attribute *attr, char *buf);
109 static ssize_t ahci_show_port_cmd(struct device *dev,
110 struct device_attribute *attr, char *buf);
111 static ssize_t ahci_read_em_buffer(struct device *dev,
112 struct device_attribute *attr, char *buf);
113 static ssize_t ahci_store_em_buffer(struct device *dev,
114 struct device_attribute *attr,
115 const char *buf, size_t size);
117 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
118 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
119 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
120 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
121 static DEVICE_ATTR(em_buffer, S_IWUSR | S_IRUGO,
122 ahci_read_em_buffer, ahci_store_em_buffer);
124 static struct device_attribute *ahci_shost_attrs[] = {
125 &dev_attr_link_power_management_policy,
126 &dev_attr_em_message_type,
127 &dev_attr_em_message,
128 &dev_attr_ahci_host_caps,
129 &dev_attr_ahci_host_cap2,
130 &dev_attr_ahci_host_version,
131 &dev_attr_ahci_port_cmd,
136 static struct device_attribute *ahci_sdev_attrs[] = {
137 &dev_attr_sw_activity,
138 &dev_attr_unload_heads,
142 struct scsi_host_template ahci_sht = {
144 .can_queue = AHCI_MAX_CMDS - 1,
145 .sg_tablesize = AHCI_MAX_SG,
146 .dma_boundary = AHCI_DMA_BOUNDARY,
147 .shost_attrs = ahci_shost_attrs,
148 .sdev_attrs = ahci_sdev_attrs,
150 EXPORT_SYMBOL_GPL(ahci_sht);
152 struct ata_port_operations ahci_ops = {
153 .inherits = &sata_pmp_port_ops,
155 .qc_defer = ahci_pmp_qc_defer,
156 .qc_prep = ahci_qc_prep,
157 .qc_issue = ahci_qc_issue,
158 .qc_fill_rtf = ahci_qc_fill_rtf,
160 .freeze = ahci_freeze,
162 .softreset = ahci_softreset,
163 .hardreset = ahci_hardreset,
164 .postreset = ahci_postreset,
165 .pmp_softreset = ahci_softreset,
166 .error_handler = ahci_error_handler,
167 .post_internal_cmd = ahci_post_internal_cmd,
168 .dev_config = ahci_dev_config,
170 .scr_read = ahci_scr_read,
171 .scr_write = ahci_scr_write,
172 .pmp_attach = ahci_pmp_attach,
173 .pmp_detach = ahci_pmp_detach,
175 .enable_pm = ahci_enable_alpm,
176 .disable_pm = ahci_disable_alpm,
177 .em_show = ahci_led_show,
178 .em_store = ahci_led_store,
179 .sw_activity_show = ahci_activity_show,
180 .sw_activity_store = ahci_activity_store,
182 .port_suspend = ahci_port_suspend,
183 .port_resume = ahci_port_resume,
185 .port_start = ahci_port_start,
186 .port_stop = ahci_port_stop,
188 EXPORT_SYMBOL_GPL(ahci_ops);
190 int ahci_em_messages = 1;
191 EXPORT_SYMBOL_GPL(ahci_em_messages);
192 module_param(ahci_em_messages, int, 0444);
193 /* add other LED protocol types when they become supported */
194 MODULE_PARM_DESC(ahci_em_messages,
195 "AHCI Enclosure Management Message control (0 = off, 1 = on)");
197 static void ahci_enable_ahci(void __iomem *mmio)
202 /* turn on AHCI_EN */
203 tmp = readl(mmio + HOST_CTL);
204 if (tmp & HOST_AHCI_EN)
207 /* Some controllers need AHCI_EN to be written multiple times.
208 * Try a few times before giving up.
210 for (i = 0; i < 5; i++) {
212 writel(tmp, mmio + HOST_CTL);
213 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
214 if (tmp & HOST_AHCI_EN)
222 static ssize_t ahci_show_host_caps(struct device *dev,
223 struct device_attribute *attr, char *buf)
225 struct Scsi_Host *shost = class_to_shost(dev);
226 struct ata_port *ap = ata_shost_to_port(shost);
227 struct ahci_host_priv *hpriv = ap->host->private_data;
229 return sprintf(buf, "%x\n", hpriv->cap);
232 static ssize_t ahci_show_host_cap2(struct device *dev,
233 struct device_attribute *attr, char *buf)
235 struct Scsi_Host *shost = class_to_shost(dev);
236 struct ata_port *ap = ata_shost_to_port(shost);
237 struct ahci_host_priv *hpriv = ap->host->private_data;
239 return sprintf(buf, "%x\n", hpriv->cap2);
242 static ssize_t ahci_show_host_version(struct device *dev,
243 struct device_attribute *attr, char *buf)
245 struct Scsi_Host *shost = class_to_shost(dev);
246 struct ata_port *ap = ata_shost_to_port(shost);
247 struct ahci_host_priv *hpriv = ap->host->private_data;
248 void __iomem *mmio = hpriv->mmio;
250 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
253 static ssize_t ahci_show_port_cmd(struct device *dev,
254 struct device_attribute *attr, char *buf)
256 struct Scsi_Host *shost = class_to_shost(dev);
257 struct ata_port *ap = ata_shost_to_port(shost);
258 void __iomem *port_mmio = ahci_port_base(ap);
260 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
263 static ssize_t ahci_read_em_buffer(struct device *dev,
264 struct device_attribute *attr, char *buf)
266 struct Scsi_Host *shost = class_to_shost(dev);
267 struct ata_port *ap = ata_shost_to_port(shost);
268 struct ahci_host_priv *hpriv = ap->host->private_data;
269 void __iomem *mmio = hpriv->mmio;
270 void __iomem *em_mmio = mmio + hpriv->em_loc;
276 spin_lock_irqsave(ap->lock, flags);
278 em_ctl = readl(mmio + HOST_EM_CTL);
279 if (!(ap->flags & ATA_FLAG_EM) || em_ctl & EM_CTL_XMT ||
280 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO)) {
281 spin_unlock_irqrestore(ap->lock, flags);
285 if (!(em_ctl & EM_CTL_MR)) {
286 spin_unlock_irqrestore(ap->lock, flags);
290 if (!(em_ctl & EM_CTL_SMB))
291 em_mmio += hpriv->em_buf_sz;
293 count = hpriv->em_buf_sz;
295 /* the count should not be larger than PAGE_SIZE */
296 if (count > PAGE_SIZE) {
297 if (printk_ratelimit())
298 ata_port_printk(ap, KERN_WARNING,
299 "EM read buffer size too large: "
300 "buffer size %u, page size %lu\n",
301 hpriv->em_buf_sz, PAGE_SIZE);
305 for (i = 0; i < count; i += 4) {
306 msg = readl(em_mmio + i);
308 buf[i + 1] = (msg >> 8) & 0xff;
309 buf[i + 2] = (msg >> 16) & 0xff;
310 buf[i + 3] = (msg >> 24) & 0xff;
313 spin_unlock_irqrestore(ap->lock, flags);
318 static ssize_t ahci_store_em_buffer(struct device *dev,
319 struct device_attribute *attr,
320 const char *buf, size_t size)
322 struct Scsi_Host *shost = class_to_shost(dev);
323 struct ata_port *ap = ata_shost_to_port(shost);
324 struct ahci_host_priv *hpriv = ap->host->private_data;
325 void __iomem *mmio = hpriv->mmio;
326 void __iomem *em_mmio = mmio + hpriv->em_loc;
327 const unsigned char *msg_buf = buf;
332 /* check size validity */
333 if (!(ap->flags & ATA_FLAG_EM) ||
334 !(hpriv->em_msg_type & EM_MSG_TYPE_SGPIO) ||
335 size % 4 || size > hpriv->em_buf_sz)
338 spin_lock_irqsave(ap->lock, flags);
340 em_ctl = readl(mmio + HOST_EM_CTL);
341 if (em_ctl & EM_CTL_TM) {
342 spin_unlock_irqrestore(ap->lock, flags);
346 for (i = 0; i < size; i += 4) {
347 msg = msg_buf[i] | msg_buf[i + 1] << 8 |
348 msg_buf[i + 2] << 16 | msg_buf[i + 3] << 24;
349 writel(msg, em_mmio + i);
352 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
354 spin_unlock_irqrestore(ap->lock, flags);
360 * ahci_save_initial_config - Save and fixup initial config values
361 * @dev: target AHCI device
362 * @hpriv: host private area to store config values
363 * @force_port_map: force port map to a specified value
364 * @mask_port_map: mask out particular bits from port map
366 * Some registers containing configuration info might be setup by
367 * BIOS and might be cleared on reset. This function saves the
368 * initial values of those registers into @hpriv such that they
369 * can be restored after controller reset.
371 * If inconsistent, config values are fixed up by this function.
376 void ahci_save_initial_config(struct device *dev,
377 struct ahci_host_priv *hpriv,
378 unsigned int force_port_map,
379 unsigned int mask_port_map)
381 void __iomem *mmio = hpriv->mmio;
382 u32 cap, cap2, vers, port_map;
385 /* make sure AHCI mode is enabled before accessing CAP */
386 ahci_enable_ahci(mmio);
388 /* Values prefixed with saved_ are written back to host after
389 * reset. Values without are used for driver operation.
391 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
392 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
394 /* CAP2 register is only defined for AHCI 1.2 and later */
395 vers = readl(mmio + HOST_VERSION);
396 if ((vers >> 16) > 1 ||
397 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
398 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
400 hpriv->saved_cap2 = cap2 = 0;
402 /* some chips have errata preventing 64bit use */
403 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
404 dev_printk(KERN_INFO, dev,
405 "controller can't do 64bit DMA, forcing 32bit\n");
409 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
410 dev_printk(KERN_INFO, dev,
411 "controller can't do NCQ, turning off CAP_NCQ\n");
412 cap &= ~HOST_CAP_NCQ;
415 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
416 dev_printk(KERN_INFO, dev,
417 "controller can do NCQ, turning on CAP_NCQ\n");
421 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
422 dev_printk(KERN_INFO, dev,
423 "controller can't do PMP, turning off CAP_PMP\n");
424 cap &= ~HOST_CAP_PMP;
427 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
428 dev_printk(KERN_INFO, dev,
429 "controller can't do SNTF, turning off CAP_SNTF\n");
430 cap &= ~HOST_CAP_SNTF;
433 if (!(cap & HOST_CAP_FBS) && (hpriv->flags & AHCI_HFLAG_YES_FBS)) {
434 dev_printk(KERN_INFO, dev,
435 "controller can do FBS, turning on CAP_FBS\n");
439 if (force_port_map && port_map != force_port_map) {
440 dev_printk(KERN_INFO, dev, "forcing port_map 0x%x -> 0x%x\n",
441 port_map, force_port_map);
442 port_map = force_port_map;
446 dev_printk(KERN_ERR, dev, "masking port_map 0x%x -> 0x%x\n",
448 port_map & mask_port_map);
449 port_map &= mask_port_map;
452 /* cross check port_map and cap.n_ports */
456 for (i = 0; i < AHCI_MAX_PORTS; i++)
457 if (port_map & (1 << i))
460 /* If PI has more ports than n_ports, whine, clear
461 * port_map and let it be generated from n_ports.
463 if (map_ports > ahci_nr_ports(cap)) {
464 dev_printk(KERN_WARNING, dev,
465 "implemented port map (0x%x) contains more "
466 "ports than nr_ports (%u), using nr_ports\n",
467 port_map, ahci_nr_ports(cap));
472 /* fabricate port_map from cap.nr_ports */
474 port_map = (1 << ahci_nr_ports(cap)) - 1;
475 dev_printk(KERN_WARNING, dev,
476 "forcing PORTS_IMPL to 0x%x\n", port_map);
478 /* write the fixed up value to the PI register */
479 hpriv->saved_port_map = port_map;
482 /* record values to use during operation */
485 hpriv->port_map = port_map;
487 EXPORT_SYMBOL_GPL(ahci_save_initial_config);
490 * ahci_restore_initial_config - Restore initial config
491 * @host: target ATA host
493 * Restore initial config stored by ahci_save_initial_config().
498 static void ahci_restore_initial_config(struct ata_host *host)
500 struct ahci_host_priv *hpriv = host->private_data;
501 void __iomem *mmio = hpriv->mmio;
503 writel(hpriv->saved_cap, mmio + HOST_CAP);
504 if (hpriv->saved_cap2)
505 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
506 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
507 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
510 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
512 static const int offset[] = {
513 [SCR_STATUS] = PORT_SCR_STAT,
514 [SCR_CONTROL] = PORT_SCR_CTL,
515 [SCR_ERROR] = PORT_SCR_ERR,
516 [SCR_ACTIVE] = PORT_SCR_ACT,
517 [SCR_NOTIFICATION] = PORT_SCR_NTF,
519 struct ahci_host_priv *hpriv = ap->host->private_data;
521 if (sc_reg < ARRAY_SIZE(offset) &&
522 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
523 return offset[sc_reg];
527 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
529 void __iomem *port_mmio = ahci_port_base(link->ap);
530 int offset = ahci_scr_offset(link->ap, sc_reg);
533 *val = readl(port_mmio + offset);
539 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
541 void __iomem *port_mmio = ahci_port_base(link->ap);
542 int offset = ahci_scr_offset(link->ap, sc_reg);
545 writel(val, port_mmio + offset);
551 void ahci_start_engine(struct ata_port *ap)
553 void __iomem *port_mmio = ahci_port_base(ap);
557 tmp = readl(port_mmio + PORT_CMD);
558 tmp |= PORT_CMD_START;
559 writel(tmp, port_mmio + PORT_CMD);
560 readl(port_mmio + PORT_CMD); /* flush */
562 EXPORT_SYMBOL_GPL(ahci_start_engine);
564 int ahci_stop_engine(struct ata_port *ap)
566 void __iomem *port_mmio = ahci_port_base(ap);
569 tmp = readl(port_mmio + PORT_CMD);
571 /* check if the HBA is idle */
572 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
575 /* setting HBA to idle */
576 tmp &= ~PORT_CMD_START;
577 writel(tmp, port_mmio + PORT_CMD);
579 /* wait for engine to stop. This could be as long as 500 msec */
580 tmp = ata_wait_register(port_mmio + PORT_CMD,
581 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
582 if (tmp & PORT_CMD_LIST_ON)
587 EXPORT_SYMBOL_GPL(ahci_stop_engine);
589 static void ahci_start_fis_rx(struct ata_port *ap)
591 void __iomem *port_mmio = ahci_port_base(ap);
592 struct ahci_host_priv *hpriv = ap->host->private_data;
593 struct ahci_port_priv *pp = ap->private_data;
596 /* set FIS registers */
597 if (hpriv->cap & HOST_CAP_64)
598 writel((pp->cmd_slot_dma >> 16) >> 16,
599 port_mmio + PORT_LST_ADDR_HI);
600 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
602 if (hpriv->cap & HOST_CAP_64)
603 writel((pp->rx_fis_dma >> 16) >> 16,
604 port_mmio + PORT_FIS_ADDR_HI);
605 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
607 /* enable FIS reception */
608 tmp = readl(port_mmio + PORT_CMD);
609 tmp |= PORT_CMD_FIS_RX;
610 writel(tmp, port_mmio + PORT_CMD);
613 readl(port_mmio + PORT_CMD);
616 static int ahci_stop_fis_rx(struct ata_port *ap)
618 void __iomem *port_mmio = ahci_port_base(ap);
621 /* disable FIS reception */
622 tmp = readl(port_mmio + PORT_CMD);
623 tmp &= ~PORT_CMD_FIS_RX;
624 writel(tmp, port_mmio + PORT_CMD);
626 /* wait for completion, spec says 500ms, give it 1000 */
627 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
628 PORT_CMD_FIS_ON, 10, 1000);
629 if (tmp & PORT_CMD_FIS_ON)
635 static void ahci_power_up(struct ata_port *ap)
637 struct ahci_host_priv *hpriv = ap->host->private_data;
638 void __iomem *port_mmio = ahci_port_base(ap);
641 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
644 if (hpriv->cap & HOST_CAP_SSS) {
645 cmd |= PORT_CMD_SPIN_UP;
646 writel(cmd, port_mmio + PORT_CMD);
650 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
653 static void ahci_disable_alpm(struct ata_port *ap)
655 struct ahci_host_priv *hpriv = ap->host->private_data;
656 void __iomem *port_mmio = ahci_port_base(ap);
658 struct ahci_port_priv *pp = ap->private_data;
660 /* IPM bits should be disabled by libata-core */
661 /* get the existing command bits */
662 cmd = readl(port_mmio + PORT_CMD);
664 /* disable ALPM and ASP */
665 cmd &= ~PORT_CMD_ASP;
666 cmd &= ~PORT_CMD_ALPE;
668 /* force the interface back to active */
669 cmd |= PORT_CMD_ICC_ACTIVE;
671 /* write out new cmd value */
672 writel(cmd, port_mmio + PORT_CMD);
673 cmd = readl(port_mmio + PORT_CMD);
675 /* wait 10ms to be sure we've come out of any low power state */
678 /* clear out any PhyRdy stuff from interrupt status */
679 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
681 /* go ahead and clean out PhyRdy Change from Serror too */
682 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
685 * Clear flag to indicate that we should ignore all PhyRdy
688 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
691 * Enable interrupts on Phy Ready.
693 pp->intr_mask |= PORT_IRQ_PHYRDY;
694 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
697 * don't change the link pm policy - we can be called
698 * just to turn of link pm temporarily
702 static int ahci_enable_alpm(struct ata_port *ap,
705 struct ahci_host_priv *hpriv = ap->host->private_data;
706 void __iomem *port_mmio = ahci_port_base(ap);
708 struct ahci_port_priv *pp = ap->private_data;
711 /* Make sure the host is capable of link power management */
712 if (!(hpriv->cap & HOST_CAP_ALPM))
716 case MAX_PERFORMANCE:
719 * if we came here with NOT_AVAILABLE,
720 * it just means this is the first time we
721 * have tried to enable - default to max performance,
722 * and let the user go to lower power modes on request.
724 ahci_disable_alpm(ap);
727 /* configure HBA to enter SLUMBER */
731 /* configure HBA to enter PARTIAL */
739 * Disable interrupts on Phy Ready. This keeps us from
740 * getting woken up due to spurious phy ready interrupts
741 * TBD - Hot plug should be done via polling now, is
742 * that even supported?
744 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
745 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
748 * Set a flag to indicate that we should ignore all PhyRdy
749 * state changes since these can happen now whenever we
752 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
754 /* get the existing command bits */
755 cmd = readl(port_mmio + PORT_CMD);
758 * Set ASP based on Policy
763 * Setting this bit will instruct the HBA to aggressively
764 * enter a lower power link state when it's appropriate and
765 * based on the value set above for ASP
767 cmd |= PORT_CMD_ALPE;
769 /* write out new cmd value */
770 writel(cmd, port_mmio + PORT_CMD);
771 cmd = readl(port_mmio + PORT_CMD);
773 /* IPM bits should be set by libata-core */
778 static void ahci_power_down(struct ata_port *ap)
780 struct ahci_host_priv *hpriv = ap->host->private_data;
781 void __iomem *port_mmio = ahci_port_base(ap);
784 if (!(hpriv->cap & HOST_CAP_SSS))
787 /* put device into listen mode, first set PxSCTL.DET to 0 */
788 scontrol = readl(port_mmio + PORT_SCR_CTL);
790 writel(scontrol, port_mmio + PORT_SCR_CTL);
792 /* then set PxCMD.SUD to 0 */
793 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
794 cmd &= ~PORT_CMD_SPIN_UP;
795 writel(cmd, port_mmio + PORT_CMD);
799 static void ahci_start_port(struct ata_port *ap)
801 struct ahci_port_priv *pp = ap->private_data;
802 struct ata_link *link;
803 struct ahci_em_priv *emp;
807 /* enable FIS reception */
808 ahci_start_fis_rx(ap);
811 ahci_start_engine(ap);
814 if (ap->flags & ATA_FLAG_EM) {
815 ata_for_each_link(link, ap, EDGE) {
816 emp = &pp->em_priv[link->pmp];
818 /* EM Transmit bit maybe busy during init */
819 for (i = 0; i < EM_MAX_RETRY; i++) {
820 rc = ahci_transmit_led_message(ap,
831 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
832 ata_for_each_link(link, ap, EDGE)
833 ahci_init_sw_activity(link);
837 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
842 rc = ahci_stop_engine(ap);
844 *emsg = "failed to stop engine";
848 /* disable FIS reception */
849 rc = ahci_stop_fis_rx(ap);
851 *emsg = "failed stop FIS RX";
858 int ahci_reset_controller(struct ata_host *host)
860 struct ahci_host_priv *hpriv = host->private_data;
861 void __iomem *mmio = hpriv->mmio;
864 /* we must be in AHCI mode, before using anything
865 * AHCI-specific, such as HOST_RESET.
867 ahci_enable_ahci(mmio);
869 /* global controller reset */
870 if (!ahci_skip_host_reset) {
871 tmp = readl(mmio + HOST_CTL);
872 if ((tmp & HOST_RESET) == 0) {
873 writel(tmp | HOST_RESET, mmio + HOST_CTL);
874 readl(mmio + HOST_CTL); /* flush */
878 * to perform host reset, OS should set HOST_RESET
879 * and poll until this bit is read to be "0".
880 * reset must complete within 1 second, or
881 * the hardware should be considered fried.
883 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
884 HOST_RESET, 10, 1000);
886 if (tmp & HOST_RESET) {
887 dev_printk(KERN_ERR, host->dev,
888 "controller reset failed (0x%x)\n", tmp);
892 /* turn on AHCI mode */
893 ahci_enable_ahci(mmio);
895 /* Some registers might be cleared on reset. Restore
898 ahci_restore_initial_config(host);
900 dev_printk(KERN_INFO, host->dev,
901 "skipping global host reset\n");
905 EXPORT_SYMBOL_GPL(ahci_reset_controller);
907 static void ahci_sw_activity(struct ata_link *link)
909 struct ata_port *ap = link->ap;
910 struct ahci_port_priv *pp = ap->private_data;
911 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
913 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
917 if (!timer_pending(&emp->timer))
918 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
921 static void ahci_sw_activity_blink(unsigned long arg)
923 struct ata_link *link = (struct ata_link *)arg;
924 struct ata_port *ap = link->ap;
925 struct ahci_port_priv *pp = ap->private_data;
926 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
927 unsigned long led_message = emp->led_state;
928 u32 activity_led_state;
931 led_message &= EM_MSG_LED_VALUE;
932 led_message |= ap->port_no | (link->pmp << 8);
934 /* check to see if we've had activity. If so,
935 * toggle state of LED and reset timer. If not,
936 * turn LED to desired idle state.
938 spin_lock_irqsave(ap->lock, flags);
939 if (emp->saved_activity != emp->activity) {
940 emp->saved_activity = emp->activity;
941 /* get the current LED state */
942 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
944 if (activity_led_state)
945 activity_led_state = 0;
947 activity_led_state = 1;
949 /* clear old state */
950 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
953 led_message |= (activity_led_state << 16);
954 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
957 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
958 if (emp->blink_policy == BLINK_OFF)
959 led_message |= (1 << 16);
961 spin_unlock_irqrestore(ap->lock, flags);
962 ahci_transmit_led_message(ap, led_message, 4);
965 static void ahci_init_sw_activity(struct ata_link *link)
967 struct ata_port *ap = link->ap;
968 struct ahci_port_priv *pp = ap->private_data;
969 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
971 /* init activity stats, setup timer */
972 emp->saved_activity = emp->activity = 0;
973 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
975 /* check our blink policy and set flag for link if it's enabled */
976 if (emp->blink_policy)
977 link->flags |= ATA_LFLAG_SW_ACTIVITY;
980 int ahci_reset_em(struct ata_host *host)
982 struct ahci_host_priv *hpriv = host->private_data;
983 void __iomem *mmio = hpriv->mmio;
986 em_ctl = readl(mmio + HOST_EM_CTL);
987 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
990 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
993 EXPORT_SYMBOL_GPL(ahci_reset_em);
995 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
998 struct ahci_host_priv *hpriv = ap->host->private_data;
999 struct ahci_port_priv *pp = ap->private_data;
1000 void __iomem *mmio = hpriv->mmio;
1002 u32 message[] = {0, 0};
1003 unsigned long flags;
1005 struct ahci_em_priv *emp;
1007 /* get the slot number from the message */
1008 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1009 if (pmp < EM_MAX_SLOTS)
1010 emp = &pp->em_priv[pmp];
1014 spin_lock_irqsave(ap->lock, flags);
1017 * if we are still busy transmitting a previous message,
1020 em_ctl = readl(mmio + HOST_EM_CTL);
1021 if (em_ctl & EM_CTL_TM) {
1022 spin_unlock_irqrestore(ap->lock, flags);
1026 if (hpriv->em_msg_type & EM_MSG_TYPE_LED) {
1028 * create message header - this is all zero except for
1029 * the message size, which is 4 bytes.
1031 message[0] |= (4 << 8);
1033 /* ignore 0:4 of byte zero, fill in port info yourself */
1034 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1036 /* write message to EM_LOC */
1037 writel(message[0], mmio + hpriv->em_loc);
1038 writel(message[1], mmio + hpriv->em_loc+4);
1041 * tell hardware to transmit the message
1043 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1046 /* save off new led state for port/slot */
1047 emp->led_state = state;
1049 spin_unlock_irqrestore(ap->lock, flags);
1053 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1055 struct ahci_port_priv *pp = ap->private_data;
1056 struct ata_link *link;
1057 struct ahci_em_priv *emp;
1060 ata_for_each_link(link, ap, EDGE) {
1061 emp = &pp->em_priv[link->pmp];
1062 rc += sprintf(buf, "%lx\n", emp->led_state);
1067 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1072 struct ahci_port_priv *pp = ap->private_data;
1073 struct ahci_em_priv *emp;
1075 state = simple_strtoul(buf, NULL, 0);
1077 /* get the slot number from the message */
1078 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1079 if (pmp < EM_MAX_SLOTS)
1080 emp = &pp->em_priv[pmp];
1084 /* mask off the activity bits if we are in sw_activity
1085 * mode, user should turn off sw_activity before setting
1086 * activity led through em_message
1088 if (emp->blink_policy)
1089 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1091 return ahci_transmit_led_message(ap, state, size);
1094 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1096 struct ata_link *link = dev->link;
1097 struct ata_port *ap = link->ap;
1098 struct ahci_port_priv *pp = ap->private_data;
1099 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1100 u32 port_led_state = emp->led_state;
1102 /* save the desired Activity LED behavior */
1105 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1107 /* set the LED to OFF */
1108 port_led_state &= EM_MSG_LED_VALUE_OFF;
1109 port_led_state |= (ap->port_no | (link->pmp << 8));
1110 ahci_transmit_led_message(ap, port_led_state, 4);
1112 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1113 if (val == BLINK_OFF) {
1114 /* set LED to ON for idle */
1115 port_led_state &= EM_MSG_LED_VALUE_OFF;
1116 port_led_state |= (ap->port_no | (link->pmp << 8));
1117 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1118 ahci_transmit_led_message(ap, port_led_state, 4);
1121 emp->blink_policy = val;
1125 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1127 struct ata_link *link = dev->link;
1128 struct ata_port *ap = link->ap;
1129 struct ahci_port_priv *pp = ap->private_data;
1130 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1132 /* display the saved value of activity behavior for this
1135 return sprintf(buf, "%d\n", emp->blink_policy);
1138 static void ahci_port_init(struct device *dev, struct ata_port *ap,
1139 int port_no, void __iomem *mmio,
1140 void __iomem *port_mmio)
1142 const char *emsg = NULL;
1146 /* make sure port is not active */
1147 rc = ahci_deinit_port(ap, &emsg);
1149 dev_warn(dev, "%s (%d)\n", emsg, rc);
1152 tmp = readl(port_mmio + PORT_SCR_ERR);
1153 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1154 writel(tmp, port_mmio + PORT_SCR_ERR);
1156 /* clear port IRQ */
1157 tmp = readl(port_mmio + PORT_IRQ_STAT);
1158 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1160 writel(tmp, port_mmio + PORT_IRQ_STAT);
1162 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1165 void ahci_init_controller(struct ata_host *host)
1167 struct ahci_host_priv *hpriv = host->private_data;
1168 void __iomem *mmio = hpriv->mmio;
1170 void __iomem *port_mmio;
1173 for (i = 0; i < host->n_ports; i++) {
1174 struct ata_port *ap = host->ports[i];
1176 port_mmio = ahci_port_base(ap);
1177 if (ata_port_is_dummy(ap))
1180 ahci_port_init(host->dev, ap, i, mmio, port_mmio);
1183 tmp = readl(mmio + HOST_CTL);
1184 VPRINTK("HOST_CTL 0x%x\n", tmp);
1185 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1186 tmp = readl(mmio + HOST_CTL);
1187 VPRINTK("HOST_CTL 0x%x\n", tmp);
1189 EXPORT_SYMBOL_GPL(ahci_init_controller);
1191 static void ahci_dev_config(struct ata_device *dev)
1193 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1195 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1196 dev->max_sectors = 255;
1197 ata_dev_printk(dev, KERN_INFO,
1198 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1202 static unsigned int ahci_dev_classify(struct ata_port *ap)
1204 void __iomem *port_mmio = ahci_port_base(ap);
1205 struct ata_taskfile tf;
1208 tmp = readl(port_mmio + PORT_SIG);
1209 tf.lbah = (tmp >> 24) & 0xff;
1210 tf.lbam = (tmp >> 16) & 0xff;
1211 tf.lbal = (tmp >> 8) & 0xff;
1212 tf.nsect = (tmp) & 0xff;
1214 return ata_dev_classify(&tf);
1217 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1220 dma_addr_t cmd_tbl_dma;
1222 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1224 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1225 pp->cmd_slot[tag].status = 0;
1226 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1227 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1230 int ahci_kick_engine(struct ata_port *ap)
1232 void __iomem *port_mmio = ahci_port_base(ap);
1233 struct ahci_host_priv *hpriv = ap->host->private_data;
1234 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1239 rc = ahci_stop_engine(ap);
1244 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1246 busy = status & (ATA_BUSY | ATA_DRQ);
1247 if (!busy && !sata_pmp_attached(ap)) {
1252 if (!(hpriv->cap & HOST_CAP_CLO)) {
1258 tmp = readl(port_mmio + PORT_CMD);
1259 tmp |= PORT_CMD_CLO;
1260 writel(tmp, port_mmio + PORT_CMD);
1263 tmp = ata_wait_register(port_mmio + PORT_CMD,
1264 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1265 if (tmp & PORT_CMD_CLO)
1268 /* restart engine */
1270 ahci_start_engine(ap);
1273 EXPORT_SYMBOL_GPL(ahci_kick_engine);
1275 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1276 struct ata_taskfile *tf, int is_cmd, u16 flags,
1277 unsigned long timeout_msec)
1279 const u32 cmd_fis_len = 5; /* five dwords */
1280 struct ahci_port_priv *pp = ap->private_data;
1281 void __iomem *port_mmio = ahci_port_base(ap);
1282 u8 *fis = pp->cmd_tbl;
1285 /* prep the command */
1286 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1287 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1290 writel(1, port_mmio + PORT_CMD_ISSUE);
1293 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1296 ahci_kick_engine(ap);
1300 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1305 int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1306 int pmp, unsigned long deadline,
1307 int (*check_ready)(struct ata_link *link))
1309 struct ata_port *ap = link->ap;
1310 struct ahci_host_priv *hpriv = ap->host->private_data;
1311 const char *reason = NULL;
1312 unsigned long now, msecs;
1313 struct ata_taskfile tf;
1318 /* prepare for SRST (AHCI-1.1 10.4.1) */
1319 rc = ahci_kick_engine(ap);
1320 if (rc && rc != -EOPNOTSUPP)
1321 ata_link_printk(link, KERN_WARNING,
1322 "failed to reset engine (errno=%d)\n", rc);
1324 ata_tf_init(link->device, &tf);
1326 /* issue the first D2H Register FIS */
1329 if (time_after(now, deadline))
1330 msecs = jiffies_to_msecs(deadline - now);
1333 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1334 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1336 reason = "1st FIS failed";
1340 /* spec says at least 5us, but be generous and sleep for 1ms */
1343 /* issue the second D2H Register FIS */
1344 tf.ctl &= ~ATA_SRST;
1345 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1347 /* wait for link to become ready */
1348 rc = ata_wait_after_reset(link, deadline, check_ready);
1349 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1351 * Workaround for cases where link online status can't
1352 * be trusted. Treat device readiness timeout as link
1355 ata_link_printk(link, KERN_INFO,
1356 "device not ready, treating as offline\n");
1357 *class = ATA_DEV_NONE;
1359 /* link occupied, -ENODEV too is an error */
1360 reason = "device not ready";
1363 *class = ahci_dev_classify(ap);
1365 DPRINTK("EXIT, class=%u\n", *class);
1369 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1373 int ahci_check_ready(struct ata_link *link)
1375 void __iomem *port_mmio = ahci_port_base(link->ap);
1376 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1378 return ata_check_ready(status);
1380 EXPORT_SYMBOL_GPL(ahci_check_ready);
1382 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1383 unsigned long deadline)
1385 int pmp = sata_srst_pmp(link);
1389 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1391 EXPORT_SYMBOL_GPL(ahci_do_softreset);
1393 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1394 unsigned long deadline)
1396 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1397 struct ata_port *ap = link->ap;
1398 struct ahci_port_priv *pp = ap->private_data;
1399 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1400 struct ata_taskfile tf;
1406 ahci_stop_engine(ap);
1408 /* clear D2H reception area to properly wait for D2H FIS */
1409 ata_tf_init(link->device, &tf);
1411 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1413 rc = sata_link_hardreset(link, timing, deadline, &online,
1416 ahci_start_engine(ap);
1419 *class = ahci_dev_classify(ap);
1421 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1425 static void ahci_postreset(struct ata_link *link, unsigned int *class)
1427 struct ata_port *ap = link->ap;
1428 void __iomem *port_mmio = ahci_port_base(ap);
1431 ata_std_postreset(link, class);
1433 /* Make sure port's ATAPI bit is set appropriately */
1434 new_tmp = tmp = readl(port_mmio + PORT_CMD);
1435 if (*class == ATA_DEV_ATAPI)
1436 new_tmp |= PORT_CMD_ATAPI;
1438 new_tmp &= ~PORT_CMD_ATAPI;
1439 if (new_tmp != tmp) {
1440 writel(new_tmp, port_mmio + PORT_CMD);
1441 readl(port_mmio + PORT_CMD); /* flush */
1445 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
1447 struct scatterlist *sg;
1448 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
1454 * Next, the S/G list.
1456 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1457 dma_addr_t addr = sg_dma_address(sg);
1458 u32 sg_len = sg_dma_len(sg);
1460 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
1461 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
1462 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
1468 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
1470 struct ata_port *ap = qc->ap;
1471 struct ahci_port_priv *pp = ap->private_data;
1473 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
1474 return ata_std_qc_defer(qc);
1476 return sata_pmp_qc_defer_cmd_switch(qc);
1479 static void ahci_qc_prep(struct ata_queued_cmd *qc)
1481 struct ata_port *ap = qc->ap;
1482 struct ahci_port_priv *pp = ap->private_data;
1483 int is_atapi = ata_is_atapi(qc->tf.protocol);
1486 const u32 cmd_fis_len = 5; /* five dwords */
1487 unsigned int n_elem;
1490 * Fill in command table information. First, the header,
1491 * a SATA Register - Host to Device command FIS.
1493 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
1495 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
1497 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
1498 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
1502 if (qc->flags & ATA_QCFLAG_DMAMAP)
1503 n_elem = ahci_fill_sg(qc, cmd_tbl);
1506 * Fill in command slot information.
1508 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
1509 if (qc->tf.flags & ATA_TFLAG_WRITE)
1510 opts |= AHCI_CMD_WRITE;
1512 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
1514 ahci_fill_cmd_slot(pp, qc->tag, opts);
1517 static void ahci_fbs_dec_intr(struct ata_port *ap)
1519 struct ahci_port_priv *pp = ap->private_data;
1520 void __iomem *port_mmio = ahci_port_base(ap);
1521 u32 fbs = readl(port_mmio + PORT_FBS);
1525 BUG_ON(!pp->fbs_enabled);
1527 /* time to wait for DEC is not specified by AHCI spec,
1528 * add a retry loop for safety.
1530 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
1531 fbs = readl(port_mmio + PORT_FBS);
1532 while ((fbs & PORT_FBS_DEC) && retries--) {
1534 fbs = readl(port_mmio + PORT_FBS);
1537 if (fbs & PORT_FBS_DEC)
1538 dev_printk(KERN_ERR, ap->host->dev,
1539 "failed to clear device error\n");
1542 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
1544 struct ahci_host_priv *hpriv = ap->host->private_data;
1545 struct ahci_port_priv *pp = ap->private_data;
1546 struct ata_eh_info *host_ehi = &ap->link.eh_info;
1547 struct ata_link *link = NULL;
1548 struct ata_queued_cmd *active_qc;
1549 struct ata_eh_info *active_ehi;
1550 bool fbs_need_dec = false;
1553 /* determine active link with error */
1554 if (pp->fbs_enabled) {
1555 void __iomem *port_mmio = ahci_port_base(ap);
1556 u32 fbs = readl(port_mmio + PORT_FBS);
1557 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
1559 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
1560 ata_link_online(&ap->pmp_link[pmp])) {
1561 link = &ap->pmp_link[pmp];
1562 fbs_need_dec = true;
1566 ata_for_each_link(link, ap, EDGE)
1567 if (ata_link_active(link))
1573 active_qc = ata_qc_from_tag(ap, link->active_tag);
1574 active_ehi = &link->eh_info;
1576 /* record irq stat */
1577 ata_ehi_clear_desc(host_ehi);
1578 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
1580 /* AHCI needs SError cleared; otherwise, it might lock up */
1581 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
1582 ahci_scr_write(&ap->link, SCR_ERROR, serror);
1583 host_ehi->serror |= serror;
1585 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
1586 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
1587 irq_stat &= ~PORT_IRQ_IF_ERR;
1589 if (irq_stat & PORT_IRQ_TF_ERR) {
1590 /* If qc is active, charge it; otherwise, the active
1591 * link. There's no active qc on NCQ errors. It will
1592 * be determined by EH by reading log page 10h.
1595 active_qc->err_mask |= AC_ERR_DEV;
1597 active_ehi->err_mask |= AC_ERR_DEV;
1599 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
1600 host_ehi->serror &= ~SERR_INTERNAL;
1603 if (irq_stat & PORT_IRQ_UNK_FIS) {
1604 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
1606 active_ehi->err_mask |= AC_ERR_HSM;
1607 active_ehi->action |= ATA_EH_RESET;
1608 ata_ehi_push_desc(active_ehi,
1609 "unknown FIS %08x %08x %08x %08x" ,
1610 unk[0], unk[1], unk[2], unk[3]);
1613 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
1614 active_ehi->err_mask |= AC_ERR_HSM;
1615 active_ehi->action |= ATA_EH_RESET;
1616 ata_ehi_push_desc(active_ehi, "incorrect PMP");
1619 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
1620 host_ehi->err_mask |= AC_ERR_HOST_BUS;
1621 host_ehi->action |= ATA_EH_RESET;
1622 ata_ehi_push_desc(host_ehi, "host bus error");
1625 if (irq_stat & PORT_IRQ_IF_ERR) {
1627 active_ehi->err_mask |= AC_ERR_DEV;
1629 host_ehi->err_mask |= AC_ERR_ATA_BUS;
1630 host_ehi->action |= ATA_EH_RESET;
1633 ata_ehi_push_desc(host_ehi, "interface fatal error");
1636 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
1637 ata_ehi_hotplugged(host_ehi);
1638 ata_ehi_push_desc(host_ehi, "%s",
1639 irq_stat & PORT_IRQ_CONNECT ?
1640 "connection status changed" : "PHY RDY changed");
1643 /* okay, let's hand over to EH */
1645 if (irq_stat & PORT_IRQ_FREEZE)
1646 ata_port_freeze(ap);
1647 else if (fbs_need_dec) {
1648 ata_link_abort(link);
1649 ahci_fbs_dec_intr(ap);
1654 static void ahci_port_intr(struct ata_port *ap)
1656 void __iomem *port_mmio = ahci_port_base(ap);
1657 struct ata_eh_info *ehi = &ap->link.eh_info;
1658 struct ahci_port_priv *pp = ap->private_data;
1659 struct ahci_host_priv *hpriv = ap->host->private_data;
1660 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
1661 u32 status, qc_active = 0;
1664 status = readl(port_mmio + PORT_IRQ_STAT);
1665 writel(status, port_mmio + PORT_IRQ_STAT);
1667 /* ignore BAD_PMP while resetting */
1668 if (unlikely(resetting))
1669 status &= ~PORT_IRQ_BAD_PMP;
1671 /* If we are getting PhyRdy, this is
1672 * just a power state change, we should
1673 * clear out this, plus the PhyRdy/Comm
1674 * Wake bits from Serror
1676 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
1677 (status & PORT_IRQ_PHYRDY)) {
1678 status &= ~PORT_IRQ_PHYRDY;
1679 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1682 if (unlikely(status & PORT_IRQ_ERROR)) {
1683 ahci_error_intr(ap, status);
1687 if (status & PORT_IRQ_SDB_FIS) {
1688 /* If SNotification is available, leave notification
1689 * handling to sata_async_notification(). If not,
1690 * emulate it by snooping SDB FIS RX area.
1692 * Snooping FIS RX area is probably cheaper than
1693 * poking SNotification but some constrollers which
1694 * implement SNotification, ICH9 for example, don't
1695 * store AN SDB FIS into receive area.
1697 if (hpriv->cap & HOST_CAP_SNTF)
1698 sata_async_notification(ap);
1700 /* If the 'N' bit in word 0 of the FIS is set,
1701 * we just received asynchronous notification.
1702 * Tell libata about it.
1704 * Lack of SNotification should not appear in
1705 * ahci 1.2, so the workaround is unnecessary
1706 * when FBS is enabled.
1708 if (pp->fbs_enabled)
1711 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
1712 u32 f0 = le32_to_cpu(f[0]);
1714 sata_async_notification(ap);
1719 /* pp->active_link is not reliable once FBS is enabled, both
1720 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
1721 * NCQ and non-NCQ commands may be in flight at the same time.
1723 if (pp->fbs_enabled) {
1724 if (ap->qc_active) {
1725 qc_active = readl(port_mmio + PORT_SCR_ACT);
1726 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
1729 /* pp->active_link is valid iff any command is in flight */
1730 if (ap->qc_active && pp->active_link->sactive)
1731 qc_active = readl(port_mmio + PORT_SCR_ACT);
1733 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
1737 rc = ata_qc_complete_multiple(ap, qc_active);
1739 /* while resetting, invalid completions are expected */
1740 if (unlikely(rc < 0 && !resetting)) {
1741 ehi->err_mask |= AC_ERR_HSM;
1742 ehi->action |= ATA_EH_RESET;
1743 ata_port_freeze(ap);
1747 irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1749 struct ata_host *host = dev_instance;
1750 struct ahci_host_priv *hpriv;
1751 unsigned int i, handled = 0;
1753 u32 irq_stat, irq_masked;
1757 hpriv = host->private_data;
1760 /* sigh. 0xffffffff is a valid return from h/w */
1761 irq_stat = readl(mmio + HOST_IRQ_STAT);
1765 irq_masked = irq_stat & hpriv->port_map;
1767 spin_lock(&host->lock);
1769 for (i = 0; i < host->n_ports; i++) {
1770 struct ata_port *ap;
1772 if (!(irq_masked & (1 << i)))
1775 ap = host->ports[i];
1778 VPRINTK("port %u\n", i);
1780 VPRINTK("port %u (no irq)\n", i);
1781 if (ata_ratelimit())
1782 dev_printk(KERN_WARNING, host->dev,
1783 "interrupt on disabled port %u\n", i);
1789 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
1790 * it should be cleared after all the port events are cleared;
1791 * otherwise, it will raise a spurious interrupt after each
1792 * valid one. Please read section 10.6.2 of ahci 1.1 for more
1795 * Also, use the unmasked value to clear interrupt as spurious
1796 * pending event on a dummy port might cause screaming IRQ.
1798 writel(irq_stat, mmio + HOST_IRQ_STAT);
1800 spin_unlock(&host->lock);
1804 return IRQ_RETVAL(handled);
1806 EXPORT_SYMBOL_GPL(ahci_interrupt);
1808 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1810 struct ata_port *ap = qc->ap;
1811 void __iomem *port_mmio = ahci_port_base(ap);
1812 struct ahci_port_priv *pp = ap->private_data;
1814 /* Keep track of the currently active link. It will be used
1815 * in completion path to determine whether NCQ phase is in
1818 pp->active_link = qc->dev->link;
1820 if (qc->tf.protocol == ATA_PROT_NCQ)
1821 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1823 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
1824 u32 fbs = readl(port_mmio + PORT_FBS);
1825 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
1826 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
1827 writel(fbs, port_mmio + PORT_FBS);
1828 pp->fbs_last_dev = qc->dev->link->pmp;
1831 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1833 ahci_sw_activity(qc->dev->link);
1838 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1840 struct ahci_port_priv *pp = qc->ap->private_data;
1841 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1843 if (pp->fbs_enabled)
1844 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
1846 ata_tf_from_fis(d2h_fis, &qc->result_tf);
1850 static void ahci_freeze(struct ata_port *ap)
1852 void __iomem *port_mmio = ahci_port_base(ap);
1855 writel(0, port_mmio + PORT_IRQ_MASK);
1858 static void ahci_thaw(struct ata_port *ap)
1860 struct ahci_host_priv *hpriv = ap->host->private_data;
1861 void __iomem *mmio = hpriv->mmio;
1862 void __iomem *port_mmio = ahci_port_base(ap);
1864 struct ahci_port_priv *pp = ap->private_data;
1867 tmp = readl(port_mmio + PORT_IRQ_STAT);
1868 writel(tmp, port_mmio + PORT_IRQ_STAT);
1869 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
1871 /* turn IRQ back on */
1872 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1875 static void ahci_error_handler(struct ata_port *ap)
1877 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1878 /* restart engine */
1879 ahci_stop_engine(ap);
1880 ahci_start_engine(ap);
1883 sata_pmp_error_handler(ap);
1885 if (!ata_dev_enabled(ap->link.device))
1886 ahci_stop_engine(ap);
1889 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1891 struct ata_port *ap = qc->ap;
1893 /* make DMA engine forget about the failed command */
1894 if (qc->flags & ATA_QCFLAG_FAILED)
1895 ahci_kick_engine(ap);
1898 static void ahci_enable_fbs(struct ata_port *ap)
1900 struct ahci_port_priv *pp = ap->private_data;
1901 void __iomem *port_mmio = ahci_port_base(ap);
1905 if (!pp->fbs_supported)
1908 fbs = readl(port_mmio + PORT_FBS);
1909 if (fbs & PORT_FBS_EN) {
1910 pp->fbs_enabled = true;
1911 pp->fbs_last_dev = -1; /* initialization */
1915 rc = ahci_stop_engine(ap);
1919 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
1920 fbs = readl(port_mmio + PORT_FBS);
1921 if (fbs & PORT_FBS_EN) {
1922 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
1923 pp->fbs_enabled = true;
1924 pp->fbs_last_dev = -1; /* initialization */
1926 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
1928 ahci_start_engine(ap);
1931 static void ahci_disable_fbs(struct ata_port *ap)
1933 struct ahci_port_priv *pp = ap->private_data;
1934 void __iomem *port_mmio = ahci_port_base(ap);
1938 if (!pp->fbs_supported)
1941 fbs = readl(port_mmio + PORT_FBS);
1942 if ((fbs & PORT_FBS_EN) == 0) {
1943 pp->fbs_enabled = false;
1947 rc = ahci_stop_engine(ap);
1951 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
1952 fbs = readl(port_mmio + PORT_FBS);
1953 if (fbs & PORT_FBS_EN)
1954 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
1956 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
1957 pp->fbs_enabled = false;
1960 ahci_start_engine(ap);
1963 static void ahci_pmp_attach(struct ata_port *ap)
1965 void __iomem *port_mmio = ahci_port_base(ap);
1966 struct ahci_port_priv *pp = ap->private_data;
1969 cmd = readl(port_mmio + PORT_CMD);
1970 cmd |= PORT_CMD_PMP;
1971 writel(cmd, port_mmio + PORT_CMD);
1973 ahci_enable_fbs(ap);
1975 pp->intr_mask |= PORT_IRQ_BAD_PMP;
1976 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1979 static void ahci_pmp_detach(struct ata_port *ap)
1981 void __iomem *port_mmio = ahci_port_base(ap);
1982 struct ahci_port_priv *pp = ap->private_data;
1985 ahci_disable_fbs(ap);
1987 cmd = readl(port_mmio + PORT_CMD);
1988 cmd &= ~PORT_CMD_PMP;
1989 writel(cmd, port_mmio + PORT_CMD);
1991 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
1992 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1995 static int ahci_port_resume(struct ata_port *ap)
1998 ahci_start_port(ap);
2000 if (sata_pmp_attached(ap))
2001 ahci_pmp_attach(ap);
2003 ahci_pmp_detach(ap);
2009 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2011 const char *emsg = NULL;
2014 rc = ahci_deinit_port(ap, &emsg);
2016 ahci_power_down(ap);
2018 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2019 ahci_start_port(ap);
2026 static int ahci_port_start(struct ata_port *ap)
2028 struct ahci_host_priv *hpriv = ap->host->private_data;
2029 struct device *dev = ap->host->dev;
2030 struct ahci_port_priv *pp;
2033 size_t dma_sz, rx_fis_sz;
2035 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2039 /* check FBS capability */
2040 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2041 void __iomem *port_mmio = ahci_port_base(ap);
2042 u32 cmd = readl(port_mmio + PORT_CMD);
2043 if (cmd & PORT_CMD_FBSCP)
2044 pp->fbs_supported = true;
2045 else if (hpriv->flags & AHCI_HFLAG_YES_FBS) {
2046 dev_printk(KERN_INFO, dev,
2047 "port %d can do FBS, forcing FBSCP\n",
2049 pp->fbs_supported = true;
2051 dev_printk(KERN_WARNING, dev,
2052 "port %d is not capable of FBS\n",
2056 if (pp->fbs_supported) {
2057 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2058 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2060 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2061 rx_fis_sz = AHCI_RX_FIS_SZ;
2064 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2067 memset(mem, 0, dma_sz);
2070 * First item in chunk of DMA memory: 32-slot command table,
2071 * 32 bytes each in size
2074 pp->cmd_slot_dma = mem_dma;
2076 mem += AHCI_CMD_SLOT_SZ;
2077 mem_dma += AHCI_CMD_SLOT_SZ;
2080 * Second item: Received-FIS area
2083 pp->rx_fis_dma = mem_dma;
2086 mem_dma += rx_fis_sz;
2089 * Third item: data area for storing a single command
2090 * and its scatter-gather table
2093 pp->cmd_tbl_dma = mem_dma;
2096 * Save off initial list of interrupts to be enabled.
2097 * This could be changed later
2099 pp->intr_mask = DEF_PORT_IRQ;
2101 ap->private_data = pp;
2103 /* engage engines, captain */
2104 return ahci_port_resume(ap);
2107 static void ahci_port_stop(struct ata_port *ap)
2109 const char *emsg = NULL;
2112 /* de-initialize port */
2113 rc = ahci_deinit_port(ap, &emsg);
2115 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2118 void ahci_print_info(struct ata_host *host, const char *scc_s)
2120 struct ahci_host_priv *hpriv = host->private_data;
2121 void __iomem *mmio = hpriv->mmio;
2122 u32 vers, cap, cap2, impl, speed;
2123 const char *speed_s;
2125 vers = readl(mmio + HOST_VERSION);
2128 impl = hpriv->port_map;
2130 speed = (cap >> 20) & 0xf;
2133 else if (speed == 2)
2135 else if (speed == 3)
2141 "AHCI %02x%02x.%02x%02x "
2142 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2145 (vers >> 24) & 0xff,
2146 (vers >> 16) & 0xff,
2150 ((cap >> 8) & 0x1f) + 1,
2163 cap & HOST_CAP_64 ? "64bit " : "",
2164 cap & HOST_CAP_NCQ ? "ncq " : "",
2165 cap & HOST_CAP_SNTF ? "sntf " : "",
2166 cap & HOST_CAP_MPS ? "ilck " : "",
2167 cap & HOST_CAP_SSS ? "stag " : "",
2168 cap & HOST_CAP_ALPM ? "pm " : "",
2169 cap & HOST_CAP_LED ? "led " : "",
2170 cap & HOST_CAP_CLO ? "clo " : "",
2171 cap & HOST_CAP_ONLY ? "only " : "",
2172 cap & HOST_CAP_PMP ? "pmp " : "",
2173 cap & HOST_CAP_FBS ? "fbs " : "",
2174 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2175 cap & HOST_CAP_SSC ? "slum " : "",
2176 cap & HOST_CAP_PART ? "part " : "",
2177 cap & HOST_CAP_CCC ? "ccc " : "",
2178 cap & HOST_CAP_EMS ? "ems " : "",
2179 cap & HOST_CAP_SXS ? "sxs " : "",
2180 cap2 & HOST_CAP2_APST ? "apst " : "",
2181 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2182 cap2 & HOST_CAP2_BOH ? "boh " : ""
2185 EXPORT_SYMBOL_GPL(ahci_print_info);
2187 void ahci_set_em_messages(struct ahci_host_priv *hpriv,
2188 struct ata_port_info *pi)
2191 void __iomem *mmio = hpriv->mmio;
2192 u32 em_loc = readl(mmio + HOST_EM_LOC);
2193 u32 em_ctl = readl(mmio + HOST_EM_CTL);
2195 if (!ahci_em_messages || !(hpriv->cap & HOST_CAP_EMS))
2198 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
2202 hpriv->em_loc = ((em_loc >> 16) * 4);
2203 hpriv->em_buf_sz = ((em_loc & 0xff) * 4);
2204 hpriv->em_msg_type = messages;
2205 pi->flags |= ATA_FLAG_EM;
2206 if (!(em_ctl & EM_CTL_ALHD))
2207 pi->flags |= ATA_FLAG_SW_ACTIVITY;
2210 EXPORT_SYMBOL_GPL(ahci_set_em_messages);
2212 MODULE_AUTHOR("Jeff Garzik");
2213 MODULE_DESCRIPTION("Common AHCI SATA low-level routines");
2214 MODULE_LICENSE("GPL");