2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
43 #include <linux/kernel.h>
44 #include <linux/module.h>
45 #include <linux/pci.h>
46 #include <linux/init.h>
47 #include <linux/list.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/blkdev.h>
52 #include <linux/delay.h>
53 #include <linux/timer.h>
54 #include <linux/interrupt.h>
55 #include <linux/completion.h>
56 #include <linux/suspend.h>
57 #include <linux/workqueue.h>
58 #include <linux/jiffies.h>
59 #include <linux/scatterlist.h>
61 #include <scsi/scsi.h>
62 #include <scsi/scsi_cmnd.h>
63 #include <scsi/scsi_host.h>
64 #include <linux/libata.h>
65 #include <asm/semaphore.h>
66 #include <asm/byteorder.h>
67 #include <linux/cdrom.h>
72 /* debounce timing parameters in msecs { interval, duration, timeout } */
73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
77 static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
80 static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
82 static void ata_dev_xfermask(struct ata_device *dev);
83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
85 unsigned int ata_print_id = 1;
86 static struct workqueue_struct *ata_wq;
88 struct workqueue_struct *ata_aux_wq;
90 struct ata_force_param {
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
99 struct ata_force_ent {
102 struct ata_force_param param;
105 static struct ata_force_ent *ata_force_tbl;
106 static int ata_force_tbl_size;
108 static char ata_force_param_buf[PAGE_SIZE] __initdata;
109 /* param_buf is thrown away after initialization, disallow read */
110 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
111 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
113 int atapi_enabled = 1;
114 module_param(atapi_enabled, int, 0444);
115 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
117 static int atapi_dmadir = 0;
118 module_param(atapi_dmadir, int, 0444);
119 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
121 int atapi_passthru16 = 1;
122 module_param(atapi_passthru16, int, 0444);
123 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
126 module_param_named(fua, libata_fua, int, 0444);
127 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
129 static int ata_ignore_hpa;
130 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
131 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
133 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
134 module_param_named(dma, libata_dma_mask, int, 0444);
135 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
137 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
138 module_param(ata_probe_timeout, int, 0444);
139 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
141 int libata_noacpi = 0;
142 module_param_named(noacpi, libata_noacpi, int, 0444);
143 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
145 int libata_allow_tpm = 0;
146 module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
147 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
149 MODULE_AUTHOR("Jeff Garzik");
150 MODULE_DESCRIPTION("Library module for ATA devices");
151 MODULE_LICENSE("GPL");
152 MODULE_VERSION(DRV_VERSION);
156 * ata_force_cbl - force cable type according to libata.force
157 * @ap: ATA port of interest
159 * Force cable type according to libata.force and whine about it.
160 * The last entry which has matching port number is used, so it
161 * can be specified as part of device force parameters. For
162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
168 void ata_force_cbl(struct ata_port *ap)
172 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
173 const struct ata_force_ent *fe = &ata_force_tbl[i];
175 if (fe->port != -1 && fe->port != ap->print_id)
178 if (fe->param.cbl == ATA_CBL_NONE)
181 ap->cbl = fe->param.cbl;
182 ata_port_printk(ap, KERN_NOTICE,
183 "FORCE: cable set to %s\n", fe->param.name);
189 * ata_force_spd_limit - force SATA spd limit according to libata.force
190 * @link: ATA link of interest
192 * Force SATA spd limit according to libata.force and whine about
193 * it. When only the port part is specified (e.g. 1:), the limit
194 * applies to all links connected to both the host link and all
195 * fan-out ports connected via PMP. If the device part is
196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
197 * link not the host link. Device number 15 always points to the
198 * host link whether PMP is attached or not.
203 static void ata_force_spd_limit(struct ata_link *link)
207 if (ata_is_host_link(link))
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
215 if (fe->port != -1 && fe->port != link->ap->print_id)
218 if (fe->device != -1 && fe->device != linkno)
221 if (!fe->param.spd_limit)
224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
225 ata_link_printk(link, KERN_NOTICE,
226 "FORCE: PHY spd limit set to %s\n", fe->param.name);
232 * ata_force_xfermask - force xfermask according to libata.force
233 * @dev: ATA device of interest
235 * Force xfer_mask according to libata.force and whine about it.
236 * For consistency with link selection, device number 15 selects
237 * the first device connected to the host link.
242 static void ata_force_xfermask(struct ata_device *dev)
244 int devno = dev->link->pmp + dev->devno;
245 int alt_devno = devno;
248 /* allow n.15 for the first device attached to host port */
249 if (ata_is_host_link(dev->link) && devno == 0)
252 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
253 const struct ata_force_ent *fe = &ata_force_tbl[i];
254 unsigned long pio_mask, mwdma_mask, udma_mask;
256 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
259 if (fe->device != -1 && fe->device != devno &&
260 fe->device != alt_devno)
263 if (!fe->param.xfer_mask)
266 ata_unpack_xfermask(fe->param.xfer_mask,
267 &pio_mask, &mwdma_mask, &udma_mask);
269 dev->udma_mask = udma_mask;
270 else if (mwdma_mask) {
272 dev->mwdma_mask = mwdma_mask;
276 dev->pio_mask = pio_mask;
279 ata_dev_printk(dev, KERN_NOTICE,
280 "FORCE: xfer_mask set to %s\n", fe->param.name);
286 * ata_force_horkage - force horkage according to libata.force
287 * @dev: ATA device of interest
289 * Force horkage according to libata.force and whine about it.
290 * For consistency with link selection, device number 15 selects
291 * the first device connected to the host link.
296 static void ata_force_horkage(struct ata_device *dev)
298 int devno = dev->link->pmp + dev->devno;
299 int alt_devno = devno;
302 /* allow n.15 for the first device attached to host port */
303 if (ata_is_host_link(dev->link) && devno == 0)
306 for (i = 0; i < ata_force_tbl_size; i++) {
307 const struct ata_force_ent *fe = &ata_force_tbl[i];
309 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
312 if (fe->device != -1 && fe->device != devno &&
313 fe->device != alt_devno)
316 if (!(~dev->horkage & fe->param.horkage_on) &&
317 !(dev->horkage & fe->param.horkage_off))
320 dev->horkage |= fe->param.horkage_on;
321 dev->horkage &= ~fe->param.horkage_off;
323 ata_dev_printk(dev, KERN_NOTICE,
324 "FORCE: horkage modified (%s)\n", fe->param.name);
329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
330 * @opcode: SCSI opcode
332 * Determine ATAPI command type from @opcode.
338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
340 int atapi_cmd_type(u8 opcode)
349 case GPCMD_WRITE_AND_VERIFY_10:
353 case GPCMD_READ_CD_MSF:
354 return ATAPI_READ_CD;
362 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
363 * @tf: Taskfile to convert
364 * @pmp: Port multiplier port
365 * @is_cmd: This FIS is for command
366 * @fis: Buffer into which data will output
368 * Converts a standard ATA taskfile to a Serial ATA
369 * FIS structure (Register - Host to Device).
372 * Inherited from caller.
374 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
376 fis[0] = 0x27; /* Register - Host to Device FIS */
377 fis[1] = pmp & 0xf; /* Port multiplier number*/
379 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
381 fis[2] = tf->command;
382 fis[3] = tf->feature;
389 fis[8] = tf->hob_lbal;
390 fis[9] = tf->hob_lbam;
391 fis[10] = tf->hob_lbah;
392 fis[11] = tf->hob_feature;
395 fis[13] = tf->hob_nsect;
406 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
407 * @fis: Buffer from which data will be input
408 * @tf: Taskfile to output
410 * Converts a serial ATA FIS structure to a standard ATA taskfile.
413 * Inherited from caller.
416 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
418 tf->command = fis[2]; /* status */
419 tf->feature = fis[3]; /* error */
426 tf->hob_lbal = fis[8];
427 tf->hob_lbam = fis[9];
428 tf->hob_lbah = fis[10];
431 tf->hob_nsect = fis[13];
434 static const u8 ata_rw_cmds[] = {
438 ATA_CMD_READ_MULTI_EXT,
439 ATA_CMD_WRITE_MULTI_EXT,
443 ATA_CMD_WRITE_MULTI_FUA_EXT,
447 ATA_CMD_PIO_READ_EXT,
448 ATA_CMD_PIO_WRITE_EXT,
461 ATA_CMD_WRITE_FUA_EXT
465 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
466 * @tf: command to examine and configure
467 * @dev: device tf belongs to
469 * Examine the device configuration and tf->flags to calculate
470 * the proper read/write commands and protocol to use.
475 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
479 int index, fua, lba48, write;
481 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
482 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
483 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
485 if (dev->flags & ATA_DFLAG_PIO) {
486 tf->protocol = ATA_PROT_PIO;
487 index = dev->multi_count ? 0 : 8;
488 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
489 /* Unable to use DMA due to host limitation */
490 tf->protocol = ATA_PROT_PIO;
491 index = dev->multi_count ? 0 : 8;
493 tf->protocol = ATA_PROT_DMA;
497 cmd = ata_rw_cmds[index + fua + lba48 + write];
506 * ata_tf_read_block - Read block address from ATA taskfile
507 * @tf: ATA taskfile of interest
508 * @dev: ATA device @tf belongs to
513 * Read block address from @tf. This function can handle all
514 * three address formats - LBA, LBA48 and CHS. tf->protocol and
515 * flags select the address format to use.
518 * Block address read from @tf.
520 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
524 if (tf->flags & ATA_TFLAG_LBA) {
525 if (tf->flags & ATA_TFLAG_LBA48) {
526 block |= (u64)tf->hob_lbah << 40;
527 block |= (u64)tf->hob_lbam << 32;
528 block |= tf->hob_lbal << 24;
530 block |= (tf->device & 0xf) << 24;
532 block |= tf->lbah << 16;
533 block |= tf->lbam << 8;
538 cyl = tf->lbam | (tf->lbah << 8);
539 head = tf->device & 0xf;
542 block = (cyl * dev->heads + head) * dev->sectors + sect;
549 * ata_build_rw_tf - Build ATA taskfile for given read/write request
550 * @tf: Target ATA taskfile
551 * @dev: ATA device @tf belongs to
552 * @block: Block address
553 * @n_block: Number of blocks
554 * @tf_flags: RW/FUA etc...
560 * Build ATA taskfile @tf for read/write request described by
561 * @block, @n_block, @tf_flags and @tag on @dev.
565 * 0 on success, -ERANGE if the request is too large for @dev,
566 * -EINVAL if the request is invalid.
568 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
569 u64 block, u32 n_block, unsigned int tf_flags,
572 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
573 tf->flags |= tf_flags;
575 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
577 if (!lba_48_ok(block, n_block))
580 tf->protocol = ATA_PROT_NCQ;
581 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
583 if (tf->flags & ATA_TFLAG_WRITE)
584 tf->command = ATA_CMD_FPDMA_WRITE;
586 tf->command = ATA_CMD_FPDMA_READ;
588 tf->nsect = tag << 3;
589 tf->hob_feature = (n_block >> 8) & 0xff;
590 tf->feature = n_block & 0xff;
592 tf->hob_lbah = (block >> 40) & 0xff;
593 tf->hob_lbam = (block >> 32) & 0xff;
594 tf->hob_lbal = (block >> 24) & 0xff;
595 tf->lbah = (block >> 16) & 0xff;
596 tf->lbam = (block >> 8) & 0xff;
597 tf->lbal = block & 0xff;
600 if (tf->flags & ATA_TFLAG_FUA)
601 tf->device |= 1 << 7;
602 } else if (dev->flags & ATA_DFLAG_LBA) {
603 tf->flags |= ATA_TFLAG_LBA;
605 if (lba_28_ok(block, n_block)) {
607 tf->device |= (block >> 24) & 0xf;
608 } else if (lba_48_ok(block, n_block)) {
609 if (!(dev->flags & ATA_DFLAG_LBA48))
613 tf->flags |= ATA_TFLAG_LBA48;
615 tf->hob_nsect = (n_block >> 8) & 0xff;
617 tf->hob_lbah = (block >> 40) & 0xff;
618 tf->hob_lbam = (block >> 32) & 0xff;
619 tf->hob_lbal = (block >> 24) & 0xff;
621 /* request too large even for LBA48 */
624 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
627 tf->nsect = n_block & 0xff;
629 tf->lbah = (block >> 16) & 0xff;
630 tf->lbam = (block >> 8) & 0xff;
631 tf->lbal = block & 0xff;
633 tf->device |= ATA_LBA;
636 u32 sect, head, cyl, track;
638 /* The request -may- be too large for CHS addressing. */
639 if (!lba_28_ok(block, n_block))
642 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
645 /* Convert LBA to CHS */
646 track = (u32)block / dev->sectors;
647 cyl = track / dev->heads;
648 head = track % dev->heads;
649 sect = (u32)block % dev->sectors + 1;
651 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
652 (u32)block, track, cyl, head, sect);
654 /* Check whether the converted CHS can fit.
658 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
661 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
672 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
673 * @pio_mask: pio_mask
674 * @mwdma_mask: mwdma_mask
675 * @udma_mask: udma_mask
677 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
678 * unsigned int xfer_mask.
686 unsigned long ata_pack_xfermask(unsigned long pio_mask,
687 unsigned long mwdma_mask,
688 unsigned long udma_mask)
690 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
691 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
692 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
696 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
697 * @xfer_mask: xfer_mask to unpack
698 * @pio_mask: resulting pio_mask
699 * @mwdma_mask: resulting mwdma_mask
700 * @udma_mask: resulting udma_mask
702 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
703 * Any NULL distination masks will be ignored.
705 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
706 unsigned long *mwdma_mask, unsigned long *udma_mask)
709 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
711 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
713 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
716 static const struct ata_xfer_ent {
720 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
721 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
722 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
727 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
728 * @xfer_mask: xfer_mask of interest
730 * Return matching XFER_* value for @xfer_mask. Only the highest
731 * bit of @xfer_mask is considered.
737 * Matching XFER_* value, 0xff if no match found.
739 u8 ata_xfer_mask2mode(unsigned long xfer_mask)
741 int highbit = fls(xfer_mask) - 1;
742 const struct ata_xfer_ent *ent;
744 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
745 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
746 return ent->base + highbit - ent->shift;
751 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
752 * @xfer_mode: XFER_* of interest
754 * Return matching xfer_mask for @xfer_mode.
760 * Matching xfer_mask, 0 if no match found.
762 unsigned long ata_xfer_mode2mask(u8 xfer_mode)
764 const struct ata_xfer_ent *ent;
766 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
767 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
768 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
769 & ~((1 << ent->shift) - 1);
774 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
775 * @xfer_mode: XFER_* of interest
777 * Return matching xfer_shift for @xfer_mode.
783 * Matching xfer_shift, -1 if no match found.
785 int ata_xfer_mode2shift(unsigned long xfer_mode)
787 const struct ata_xfer_ent *ent;
789 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
790 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
796 * ata_mode_string - convert xfer_mask to string
797 * @xfer_mask: mask of bits supported; only highest bit counts.
799 * Determine string which represents the highest speed
800 * (highest bit in @modemask).
806 * Constant C string representing highest speed listed in
807 * @mode_mask, or the constant C string "<n/a>".
809 const char *ata_mode_string(unsigned long xfer_mask)
811 static const char * const xfer_mode_str[] = {
835 highbit = fls(xfer_mask) - 1;
836 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
837 return xfer_mode_str[highbit];
841 static const char *sata_spd_string(unsigned int spd)
843 static const char * const spd_str[] = {
848 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
850 return spd_str[spd - 1];
853 void ata_dev_disable(struct ata_device *dev)
855 if (ata_dev_enabled(dev)) {
856 if (ata_msg_drv(dev->link->ap))
857 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
858 ata_acpi_on_disable(dev);
859 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
865 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
867 struct ata_link *link = dev->link;
868 struct ata_port *ap = link->ap;
870 unsigned int err_mask;
874 * disallow DIPM for drivers which haven't set
875 * ATA_FLAG_IPM. This is because when DIPM is enabled,
876 * phy ready will be set in the interrupt status on
877 * state changes, which will cause some drivers to
878 * think there are errors - additionally drivers will
879 * need to disable hot plug.
881 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
882 ap->pm_policy = NOT_AVAILABLE;
887 * For DIPM, we will only enable it for the
890 * Why? Because Disks are too stupid to know that
891 * If the host rejects a request to go to SLUMBER
892 * they should retry at PARTIAL, and instead it
893 * just would give up. So, for medium_power to
894 * work at all, we need to only allow HIPM.
896 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
902 /* no restrictions on IPM transitions */
903 scontrol &= ~(0x3 << 8);
904 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
909 if (dev->flags & ATA_DFLAG_DIPM)
910 err_mask = ata_dev_set_feature(dev,
911 SETFEATURES_SATA_ENABLE, SATA_DIPM);
914 /* allow IPM to PARTIAL */
915 scontrol &= ~(0x1 << 8);
916 scontrol |= (0x2 << 8);
917 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
922 * we don't have to disable DIPM since IPM flags
923 * disallow transitions to SLUMBER, which effectively
924 * disable DIPM if it does not support PARTIAL
928 case MAX_PERFORMANCE:
929 /* disable all IPM transitions */
930 scontrol |= (0x3 << 8);
931 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
936 * we don't have to disable DIPM since IPM flags
937 * disallow all transitions which effectively
938 * disable DIPM anyway.
943 /* FIXME: handle SET FEATURES failure */
950 * ata_dev_enable_pm - enable SATA interface power management
951 * @dev: device to enable power management
952 * @policy: the link power management policy
954 * Enable SATA Interface power management. This will enable
955 * Device Interface Power Management (DIPM) for min_power
956 * policy, and then call driver specific callbacks for
957 * enabling Host Initiated Power management.
960 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
962 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
965 struct ata_port *ap = dev->link->ap;
967 /* set HIPM first, then DIPM */
968 if (ap->ops->enable_pm)
969 rc = ap->ops->enable_pm(ap, policy);
972 rc = ata_dev_set_dipm(dev, policy);
976 ap->pm_policy = MAX_PERFORMANCE;
978 ap->pm_policy = policy;
979 return /* rc */; /* hopefully we can use 'rc' eventually */
984 * ata_dev_disable_pm - disable SATA interface power management
985 * @dev: device to disable power management
987 * Disable SATA Interface power management. This will disable
988 * Device Interface Power Management (DIPM) without changing
989 * policy, call driver specific callbacks for disabling Host
990 * Initiated Power management.
995 static void ata_dev_disable_pm(struct ata_device *dev)
997 struct ata_port *ap = dev->link->ap;
999 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1000 if (ap->ops->disable_pm)
1001 ap->ops->disable_pm(ap);
1003 #endif /* CONFIG_PM */
1005 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1007 ap->pm_policy = policy;
1008 ap->link.eh_info.action |= ATA_EH_LPM;
1009 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1010 ata_port_schedule_eh(ap);
1014 static void ata_lpm_enable(struct ata_host *host)
1016 struct ata_link *link;
1017 struct ata_port *ap;
1018 struct ata_device *dev;
1021 for (i = 0; i < host->n_ports; i++) {
1022 ap = host->ports[i];
1023 ata_port_for_each_link(link, ap) {
1024 ata_link_for_each_dev(dev, link)
1025 ata_dev_disable_pm(dev);
1030 static void ata_lpm_disable(struct ata_host *host)
1034 for (i = 0; i < host->n_ports; i++) {
1035 struct ata_port *ap = host->ports[i];
1036 ata_lpm_schedule(ap, ap->pm_policy);
1039 #endif /* CONFIG_PM */
1043 * ata_devchk - PATA device presence detection
1044 * @ap: ATA channel to examine
1045 * @device: Device to examine (starting at zero)
1047 * This technique was originally described in
1048 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1049 * later found its way into the ATA/ATAPI spec.
1051 * Write a pattern to the ATA shadow registers,
1052 * and if a device is present, it will respond by
1053 * correctly storing and echoing back the
1054 * ATA shadow register contents.
1060 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1062 struct ata_ioports *ioaddr = &ap->ioaddr;
1065 ap->ops->dev_select(ap, device);
1067 iowrite8(0x55, ioaddr->nsect_addr);
1068 iowrite8(0xaa, ioaddr->lbal_addr);
1070 iowrite8(0xaa, ioaddr->nsect_addr);
1071 iowrite8(0x55, ioaddr->lbal_addr);
1073 iowrite8(0x55, ioaddr->nsect_addr);
1074 iowrite8(0xaa, ioaddr->lbal_addr);
1076 nsect = ioread8(ioaddr->nsect_addr);
1077 lbal = ioread8(ioaddr->lbal_addr);
1079 if ((nsect == 0x55) && (lbal == 0xaa))
1080 return 1; /* we found a device */
1082 return 0; /* nothing found */
1086 * ata_dev_classify - determine device type based on ATA-spec signature
1087 * @tf: ATA taskfile register set for device to be identified
1089 * Determine from taskfile register contents whether a device is
1090 * ATA or ATAPI, as per "Signature and persistence" section
1091 * of ATA/PI spec (volume 1, sect 5.14).
1097 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1098 * %ATA_DEV_UNKNOWN the event of failure.
1100 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1102 /* Apple's open source Darwin code hints that some devices only
1103 * put a proper signature into the LBA mid/high registers,
1104 * So, we only check those. It's sufficient for uniqueness.
1106 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1107 * signatures for ATA and ATAPI devices attached on SerialATA,
1108 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1109 * spec has never mentioned about using different signatures
1110 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1111 * Multiplier specification began to use 0x69/0x96 to identify
1112 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1113 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1114 * 0x69/0x96 shortly and described them as reserved for
1117 * We follow the current spec and consider that 0x69/0x96
1118 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1120 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1121 DPRINTK("found ATA device by sig\n");
1125 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1126 DPRINTK("found ATAPI device by sig\n");
1127 return ATA_DEV_ATAPI;
1130 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1131 DPRINTK("found PMP device by sig\n");
1135 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
1136 printk(KERN_INFO "ata: SEMB device ignored\n");
1137 return ATA_DEV_SEMB_UNSUP; /* not yet */
1140 DPRINTK("unknown device\n");
1141 return ATA_DEV_UNKNOWN;
1145 * ata_dev_try_classify - Parse returned ATA device signature
1146 * @dev: ATA device to classify (starting at zero)
1147 * @present: device seems present
1148 * @r_err: Value of error register on completion
1150 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1151 * an ATA/ATAPI-defined set of values is placed in the ATA
1152 * shadow registers, indicating the results of device detection
1155 * Select the ATA device, and read the values from the ATA shadow
1156 * registers. Then parse according to the Error register value,
1157 * and the spec-defined values examined by ata_dev_classify().
1163 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1165 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1168 struct ata_port *ap = dev->link->ap;
1169 struct ata_taskfile tf;
1173 ap->ops->dev_select(ap, dev->devno);
1175 memset(&tf, 0, sizeof(tf));
1177 ap->ops->tf_read(ap, &tf);
1182 /* see if device passed diags: continue and warn later */
1184 /* diagnostic fail : do nothing _YET_ */
1185 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
1188 else if ((dev->devno == 0) && (err == 0x81))
1191 return ATA_DEV_NONE;
1193 /* determine if device is ATA or ATAPI */
1194 class = ata_dev_classify(&tf);
1196 if (class == ATA_DEV_UNKNOWN) {
1197 /* If the device failed diagnostic, it's likely to
1198 * have reported incorrect device signature too.
1199 * Assume ATA device if the device seems present but
1200 * device signature is invalid with diagnostic
1203 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1204 class = ATA_DEV_ATA;
1206 class = ATA_DEV_NONE;
1207 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1208 class = ATA_DEV_NONE;
1214 * ata_id_string - Convert IDENTIFY DEVICE page into string
1215 * @id: IDENTIFY DEVICE results we will examine
1216 * @s: string into which data is output
1217 * @ofs: offset into identify device page
1218 * @len: length of string to return. must be an even number.
1220 * The strings in the IDENTIFY DEVICE page are broken up into
1221 * 16-bit chunks. Run through the string, and output each
1222 * 8-bit chunk linearly, regardless of platform.
1228 void ata_id_string(const u16 *id, unsigned char *s,
1229 unsigned int ofs, unsigned int len)
1248 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
1249 * @id: IDENTIFY DEVICE results we will examine
1250 * @s: string into which data is output
1251 * @ofs: offset into identify device page
1252 * @len: length of string to return. must be an odd number.
1254 * This function is identical to ata_id_string except that it
1255 * trims trailing spaces and terminates the resulting string with
1256 * null. @len must be actual maximum length (even number) + 1.
1261 void ata_id_c_string(const u16 *id, unsigned char *s,
1262 unsigned int ofs, unsigned int len)
1266 WARN_ON(!(len & 1));
1268 ata_id_string(id, s, ofs, len - 1);
1270 p = s + strnlen(s, len - 1);
1271 while (p > s && p[-1] == ' ')
1276 static u64 ata_id_n_sectors(const u16 *id)
1278 if (ata_id_has_lba(id)) {
1279 if (ata_id_has_lba48(id))
1280 return ata_id_u64(id, 100);
1282 return ata_id_u32(id, 60);
1284 if (ata_id_current_chs_valid(id))
1285 return ata_id_u32(id, 57);
1287 return id[1] * id[3] * id[6];
1291 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1295 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1296 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1297 sectors |= (tf->hob_lbal & 0xff) << 24;
1298 sectors |= (tf->lbah & 0xff) << 16;
1299 sectors |= (tf->lbam & 0xff) << 8;
1300 sectors |= (tf->lbal & 0xff);
1305 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1309 sectors |= (tf->device & 0x0f) << 24;
1310 sectors |= (tf->lbah & 0xff) << 16;
1311 sectors |= (tf->lbam & 0xff) << 8;
1312 sectors |= (tf->lbal & 0xff);
1318 * ata_read_native_max_address - Read native max address
1319 * @dev: target device
1320 * @max_sectors: out parameter for the result native max address
1322 * Perform an LBA48 or LBA28 native size query upon the device in
1326 * 0 on success, -EACCES if command is aborted by the drive.
1327 * -EIO on other errors.
1329 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1331 unsigned int err_mask;
1332 struct ata_taskfile tf;
1333 int lba48 = ata_id_has_lba48(dev->id);
1335 ata_tf_init(dev, &tf);
1337 /* always clear all address registers */
1338 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1341 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1342 tf.flags |= ATA_TFLAG_LBA48;
1344 tf.command = ATA_CMD_READ_NATIVE_MAX;
1346 tf.protocol |= ATA_PROT_NODATA;
1347 tf.device |= ATA_LBA;
1349 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1351 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1352 "max address (err_mask=0x%x)\n", err_mask);
1353 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1359 *max_sectors = ata_tf_to_lba48(&tf);
1361 *max_sectors = ata_tf_to_lba(&tf);
1362 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
1368 * ata_set_max_sectors - Set max sectors
1369 * @dev: target device
1370 * @new_sectors: new max sectors value to set for the device
1372 * Set max sectors of @dev to @new_sectors.
1375 * 0 on success, -EACCES if command is aborted or denied (due to
1376 * previous non-volatile SET_MAX) by the drive. -EIO on other
1379 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1381 unsigned int err_mask;
1382 struct ata_taskfile tf;
1383 int lba48 = ata_id_has_lba48(dev->id);
1387 ata_tf_init(dev, &tf);
1389 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1392 tf.command = ATA_CMD_SET_MAX_EXT;
1393 tf.flags |= ATA_TFLAG_LBA48;
1395 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1396 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1397 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1399 tf.command = ATA_CMD_SET_MAX;
1401 tf.device |= (new_sectors >> 24) & 0xf;
1404 tf.protocol |= ATA_PROT_NODATA;
1405 tf.device |= ATA_LBA;
1407 tf.lbal = (new_sectors >> 0) & 0xff;
1408 tf.lbam = (new_sectors >> 8) & 0xff;
1409 tf.lbah = (new_sectors >> 16) & 0xff;
1411 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1413 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1414 "max address (err_mask=0x%x)\n", err_mask);
1415 if (err_mask == AC_ERR_DEV &&
1416 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1425 * ata_hpa_resize - Resize a device with an HPA set
1426 * @dev: Device to resize
1428 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1429 * it if required to the full size of the media. The caller must check
1430 * the drive has the HPA feature set enabled.
1433 * 0 on success, -errno on failure.
1435 static int ata_hpa_resize(struct ata_device *dev)
1437 struct ata_eh_context *ehc = &dev->link->eh_context;
1438 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1439 u64 sectors = ata_id_n_sectors(dev->id);
1443 /* do we need to do it? */
1444 if (dev->class != ATA_DEV_ATA ||
1445 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1446 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1449 /* read native max address */
1450 rc = ata_read_native_max_address(dev, &native_sectors);
1452 /* If device aborted the command or HPA isn't going to
1453 * be unlocked, skip HPA resizing.
1455 if (rc == -EACCES || !ata_ignore_hpa) {
1456 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1457 "broken, skipping HPA handling\n");
1458 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1460 /* we can continue if device aborted the command */
1468 /* nothing to do? */
1469 if (native_sectors <= sectors || !ata_ignore_hpa) {
1470 if (!print_info || native_sectors == sectors)
1473 if (native_sectors > sectors)
1474 ata_dev_printk(dev, KERN_INFO,
1475 "HPA detected: current %llu, native %llu\n",
1476 (unsigned long long)sectors,
1477 (unsigned long long)native_sectors);
1478 else if (native_sectors < sectors)
1479 ata_dev_printk(dev, KERN_WARNING,
1480 "native sectors (%llu) is smaller than "
1482 (unsigned long long)native_sectors,
1483 (unsigned long long)sectors);
1487 /* let's unlock HPA */
1488 rc = ata_set_max_sectors(dev, native_sectors);
1489 if (rc == -EACCES) {
1490 /* if device aborted the command, skip HPA resizing */
1491 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1492 "(%llu -> %llu), skipping HPA handling\n",
1493 (unsigned long long)sectors,
1494 (unsigned long long)native_sectors);
1495 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1500 /* re-read IDENTIFY data */
1501 rc = ata_dev_reread_id(dev, 0);
1503 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1504 "data after HPA resizing\n");
1509 u64 new_sectors = ata_id_n_sectors(dev->id);
1510 ata_dev_printk(dev, KERN_INFO,
1511 "HPA unlocked: %llu -> %llu, native %llu\n",
1512 (unsigned long long)sectors,
1513 (unsigned long long)new_sectors,
1514 (unsigned long long)native_sectors);
1521 * ata_noop_dev_select - Select device 0/1 on ATA bus
1522 * @ap: ATA channel to manipulate
1523 * @device: ATA device (numbered from zero) to select
1525 * This function performs no actual function.
1527 * May be used as the dev_select() entry in ata_port_operations.
1532 void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1538 * ata_std_dev_select - Select device 0/1 on ATA bus
1539 * @ap: ATA channel to manipulate
1540 * @device: ATA device (numbered from zero) to select
1542 * Use the method defined in the ATA specification to
1543 * make either device 0, or device 1, active on the
1544 * ATA channel. Works with both PIO and MMIO.
1546 * May be used as the dev_select() entry in ata_port_operations.
1552 void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1557 tmp = ATA_DEVICE_OBS;
1559 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1561 iowrite8(tmp, ap->ioaddr.device_addr);
1562 ata_pause(ap); /* needed; also flushes, for mmio */
1566 * ata_dev_select - Select device 0/1 on ATA bus
1567 * @ap: ATA channel to manipulate
1568 * @device: ATA device (numbered from zero) to select
1569 * @wait: non-zero to wait for Status register BSY bit to clear
1570 * @can_sleep: non-zero if context allows sleeping
1572 * Use the method defined in the ATA specification to
1573 * make either device 0, or device 1, active on the
1576 * This is a high-level version of ata_std_dev_select(),
1577 * which additionally provides the services of inserting
1578 * the proper pauses and status polling, where needed.
1584 void ata_dev_select(struct ata_port *ap, unsigned int device,
1585 unsigned int wait, unsigned int can_sleep)
1587 if (ata_msg_probe(ap))
1588 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1589 "device %u, wait %u\n", device, wait);
1594 ap->ops->dev_select(ap, device);
1597 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1604 * ata_dump_id - IDENTIFY DEVICE info debugging output
1605 * @id: IDENTIFY DEVICE page to dump
1607 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1614 static inline void ata_dump_id(const u16 *id)
1616 DPRINTK("49==0x%04x "
1626 DPRINTK("80==0x%04x "
1636 DPRINTK("88==0x%04x "
1643 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1644 * @id: IDENTIFY data to compute xfer mask from
1646 * Compute the xfermask for this device. This is not as trivial
1647 * as it seems if we must consider early devices correctly.
1649 * FIXME: pre IDE drive timing (do we care ?).
1657 unsigned long ata_id_xfermask(const u16 *id)
1659 unsigned long pio_mask, mwdma_mask, udma_mask;
1661 /* Usual case. Word 53 indicates word 64 is valid */
1662 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1663 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1667 /* If word 64 isn't valid then Word 51 high byte holds
1668 * the PIO timing number for the maximum. Turn it into
1671 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1672 if (mode < 5) /* Valid PIO range */
1673 pio_mask = (2 << mode) - 1;
1677 /* But wait.. there's more. Design your standards by
1678 * committee and you too can get a free iordy field to
1679 * process. However its the speeds not the modes that
1680 * are supported... Note drivers using the timing API
1681 * will get this right anyway
1685 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1687 if (ata_id_is_cfa(id)) {
1689 * Process compact flash extended modes
1691 int pio = id[163] & 0x7;
1692 int dma = (id[163] >> 3) & 7;
1695 pio_mask |= (1 << 5);
1697 pio_mask |= (1 << 6);
1699 mwdma_mask |= (1 << 3);
1701 mwdma_mask |= (1 << 4);
1705 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1706 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1708 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1712 * ata_pio_queue_task - Queue port_task
1713 * @ap: The ata_port to queue port_task for
1714 * @fn: workqueue function to be scheduled
1715 * @data: data for @fn to use
1716 * @delay: delay time for workqueue function
1718 * Schedule @fn(@data) for execution after @delay jiffies using
1719 * port_task. There is one port_task per port and it's the
1720 * user(low level driver)'s responsibility to make sure that only
1721 * one task is active at any given time.
1723 * libata core layer takes care of synchronization between
1724 * port_task and EH. ata_pio_queue_task() may be ignored for EH
1728 * Inherited from caller.
1730 static void ata_pio_queue_task(struct ata_port *ap, void *data,
1731 unsigned long delay)
1733 ap->port_task_data = data;
1735 /* may fail if ata_port_flush_task() in progress */
1736 queue_delayed_work(ata_wq, &ap->port_task, delay);
1740 * ata_port_flush_task - Flush port_task
1741 * @ap: The ata_port to flush port_task for
1743 * After this function completes, port_task is guranteed not to
1744 * be running or scheduled.
1747 * Kernel thread context (may sleep)
1749 void ata_port_flush_task(struct ata_port *ap)
1753 cancel_rearming_delayed_work(&ap->port_task);
1755 if (ata_msg_ctl(ap))
1756 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
1759 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1761 struct completion *waiting = qc->private_data;
1767 * ata_exec_internal_sg - execute libata internal command
1768 * @dev: Device to which the command is sent
1769 * @tf: Taskfile registers for the command and the result
1770 * @cdb: CDB for packet command
1771 * @dma_dir: Data tranfer direction of the command
1772 * @sgl: sg list for the data buffer of the command
1773 * @n_elem: Number of sg entries
1774 * @timeout: Timeout in msecs (0 for default)
1776 * Executes libata internal command with timeout. @tf contains
1777 * command on entry and result on return. Timeout and error
1778 * conditions are reported via return value. No recovery action
1779 * is taken after a command times out. It's caller's duty to
1780 * clean up after timeout.
1783 * None. Should be called with kernel context, might sleep.
1786 * Zero on success, AC_ERR_* mask on failure
1788 unsigned ata_exec_internal_sg(struct ata_device *dev,
1789 struct ata_taskfile *tf, const u8 *cdb,
1790 int dma_dir, struct scatterlist *sgl,
1791 unsigned int n_elem, unsigned long timeout)
1793 struct ata_link *link = dev->link;
1794 struct ata_port *ap = link->ap;
1795 u8 command = tf->command;
1796 struct ata_queued_cmd *qc;
1797 unsigned int tag, preempted_tag;
1798 u32 preempted_sactive, preempted_qc_active;
1799 int preempted_nr_active_links;
1800 DECLARE_COMPLETION_ONSTACK(wait);
1801 unsigned long flags;
1802 unsigned int err_mask;
1805 spin_lock_irqsave(ap->lock, flags);
1807 /* no internal command while frozen */
1808 if (ap->pflags & ATA_PFLAG_FROZEN) {
1809 spin_unlock_irqrestore(ap->lock, flags);
1810 return AC_ERR_SYSTEM;
1813 /* initialize internal qc */
1815 /* XXX: Tag 0 is used for drivers with legacy EH as some
1816 * drivers choke if any other tag is given. This breaks
1817 * ata_tag_internal() test for those drivers. Don't use new
1818 * EH stuff without converting to it.
1820 if (ap->ops->error_handler)
1821 tag = ATA_TAG_INTERNAL;
1825 if (test_and_set_bit(tag, &ap->qc_allocated))
1827 qc = __ata_qc_from_tag(ap, tag);
1835 preempted_tag = link->active_tag;
1836 preempted_sactive = link->sactive;
1837 preempted_qc_active = ap->qc_active;
1838 preempted_nr_active_links = ap->nr_active_links;
1839 link->active_tag = ATA_TAG_POISON;
1842 ap->nr_active_links = 0;
1844 /* prepare & issue qc */
1847 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1848 qc->flags |= ATA_QCFLAG_RESULT_TF;
1849 qc->dma_dir = dma_dir;
1850 if (dma_dir != DMA_NONE) {
1851 unsigned int i, buflen = 0;
1852 struct scatterlist *sg;
1854 for_each_sg(sgl, sg, n_elem, i)
1855 buflen += sg->length;
1857 ata_sg_init(qc, sgl, n_elem);
1858 qc->nbytes = buflen;
1861 qc->private_data = &wait;
1862 qc->complete_fn = ata_qc_complete_internal;
1866 spin_unlock_irqrestore(ap->lock, flags);
1869 timeout = ata_probe_timeout * 1000 / HZ;
1871 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
1873 ata_port_flush_task(ap);
1876 spin_lock_irqsave(ap->lock, flags);
1878 /* We're racing with irq here. If we lose, the
1879 * following test prevents us from completing the qc
1880 * twice. If we win, the port is frozen and will be
1881 * cleaned up by ->post_internal_cmd().
1883 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1884 qc->err_mask |= AC_ERR_TIMEOUT;
1886 if (ap->ops->error_handler)
1887 ata_port_freeze(ap);
1889 ata_qc_complete(qc);
1891 if (ata_msg_warn(ap))
1892 ata_dev_printk(dev, KERN_WARNING,
1893 "qc timeout (cmd 0x%x)\n", command);
1896 spin_unlock_irqrestore(ap->lock, flags);
1899 /* do post_internal_cmd */
1900 if (ap->ops->post_internal_cmd)
1901 ap->ops->post_internal_cmd(qc);
1903 /* perform minimal error analysis */
1904 if (qc->flags & ATA_QCFLAG_FAILED) {
1905 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1906 qc->err_mask |= AC_ERR_DEV;
1909 qc->err_mask |= AC_ERR_OTHER;
1911 if (qc->err_mask & ~AC_ERR_OTHER)
1912 qc->err_mask &= ~AC_ERR_OTHER;
1916 spin_lock_irqsave(ap->lock, flags);
1918 *tf = qc->result_tf;
1919 err_mask = qc->err_mask;
1922 link->active_tag = preempted_tag;
1923 link->sactive = preempted_sactive;
1924 ap->qc_active = preempted_qc_active;
1925 ap->nr_active_links = preempted_nr_active_links;
1927 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1928 * Until those drivers are fixed, we detect the condition
1929 * here, fail the command with AC_ERR_SYSTEM and reenable the
1932 * Note that this doesn't change any behavior as internal
1933 * command failure results in disabling the device in the
1934 * higher layer for LLDDs without new reset/EH callbacks.
1936 * Kill the following code as soon as those drivers are fixed.
1938 if (ap->flags & ATA_FLAG_DISABLED) {
1939 err_mask |= AC_ERR_SYSTEM;
1943 spin_unlock_irqrestore(ap->lock, flags);
1949 * ata_exec_internal - execute libata internal command
1950 * @dev: Device to which the command is sent
1951 * @tf: Taskfile registers for the command and the result
1952 * @cdb: CDB for packet command
1953 * @dma_dir: Data tranfer direction of the command
1954 * @buf: Data buffer of the command
1955 * @buflen: Length of data buffer
1956 * @timeout: Timeout in msecs (0 for default)
1958 * Wrapper around ata_exec_internal_sg() which takes simple
1959 * buffer instead of sg list.
1962 * None. Should be called with kernel context, might sleep.
1965 * Zero on success, AC_ERR_* mask on failure
1967 unsigned ata_exec_internal(struct ata_device *dev,
1968 struct ata_taskfile *tf, const u8 *cdb,
1969 int dma_dir, void *buf, unsigned int buflen,
1970 unsigned long timeout)
1972 struct scatterlist *psg = NULL, sg;
1973 unsigned int n_elem = 0;
1975 if (dma_dir != DMA_NONE) {
1977 sg_init_one(&sg, buf, buflen);
1982 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1987 * ata_do_simple_cmd - execute simple internal command
1988 * @dev: Device to which the command is sent
1989 * @cmd: Opcode to execute
1991 * Execute a 'simple' command, that only consists of the opcode
1992 * 'cmd' itself, without filling any other registers
1995 * Kernel thread context (may sleep).
1998 * Zero on success, AC_ERR_* mask on failure
2000 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
2002 struct ata_taskfile tf;
2004 ata_tf_init(dev, &tf);
2007 tf.flags |= ATA_TFLAG_DEVICE;
2008 tf.protocol = ATA_PROT_NODATA;
2010 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2014 * ata_pio_need_iordy - check if iordy needed
2017 * Check if the current speed of the device requires IORDY. Used
2018 * by various controllers for chip configuration.
2021 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2023 /* Controller doesn't support IORDY. Probably a pointless check
2024 as the caller should know this */
2025 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
2027 /* PIO3 and higher it is mandatory */
2028 if (adev->pio_mode > XFER_PIO_2)
2030 /* We turn it on when possible */
2031 if (ata_id_has_iordy(adev->id))
2037 * ata_pio_mask_no_iordy - Return the non IORDY mask
2040 * Compute the highest mode possible if we are not using iordy. Return
2041 * -1 if no iordy mode is available.
2044 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2046 /* If we have no drive specific rule, then PIO 2 is non IORDY */
2047 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
2048 u16 pio = adev->id[ATA_ID_EIDE_PIO];
2049 /* Is the speed faster than the drive allows non IORDY ? */
2051 /* This is cycle times not frequency - watch the logic! */
2052 if (pio > 240) /* PIO2 is 240nS per cycle */
2053 return 3 << ATA_SHIFT_PIO;
2054 return 7 << ATA_SHIFT_PIO;
2057 return 3 << ATA_SHIFT_PIO;
2061 * ata_dev_read_id - Read ID data from the specified device
2062 * @dev: target device
2063 * @p_class: pointer to class of the target device (may be changed)
2064 * @flags: ATA_READID_* flags
2065 * @id: buffer to read IDENTIFY data into
2067 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2068 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
2069 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2070 * for pre-ATA4 drives.
2072 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2073 * now we abort if we hit that case.
2076 * Kernel thread context (may sleep)
2079 * 0 on success, -errno otherwise.
2081 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
2082 unsigned int flags, u16 *id)
2084 struct ata_port *ap = dev->link->ap;
2085 unsigned int class = *p_class;
2086 struct ata_taskfile tf;
2087 unsigned int err_mask = 0;
2089 int may_fallback = 1, tried_spinup = 0;
2092 if (ata_msg_ctl(ap))
2093 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2095 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
2097 ata_tf_init(dev, &tf);
2101 tf.command = ATA_CMD_ID_ATA;
2104 tf.command = ATA_CMD_ID_ATAPI;
2108 reason = "unsupported class";
2112 tf.protocol = ATA_PROT_PIO;
2114 /* Some devices choke if TF registers contain garbage. Make
2115 * sure those are properly initialized.
2117 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2119 /* Device presence detection is unreliable on some
2120 * controllers. Always poll IDENTIFY if available.
2122 tf.flags |= ATA_TFLAG_POLLING;
2124 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2125 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
2127 if (err_mask & AC_ERR_NODEV_HINT) {
2128 ata_dev_printk(dev, KERN_DEBUG,
2129 "NODEV after polling detection\n");
2133 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2134 /* Device or controller might have reported
2135 * the wrong device class. Give a shot at the
2136 * other IDENTIFY if the current one is
2137 * aborted by the device.
2142 if (class == ATA_DEV_ATA)
2143 class = ATA_DEV_ATAPI;
2145 class = ATA_DEV_ATA;
2149 /* Control reaches here iff the device aborted
2150 * both flavors of IDENTIFYs which happens
2151 * sometimes with phantom devices.
2153 ata_dev_printk(dev, KERN_DEBUG,
2154 "both IDENTIFYs aborted, assuming NODEV\n");
2159 reason = "I/O error";
2163 /* Falling back doesn't make sense if ID data was read
2164 * successfully at least once.
2168 swap_buf_le16(id, ATA_ID_WORDS);
2172 reason = "device reports invalid type";
2174 if (class == ATA_DEV_ATA) {
2175 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2178 if (ata_id_is_ata(id))
2182 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2185 * Drive powered-up in standby mode, and requires a specific
2186 * SET_FEATURES spin-up subcommand before it will accept
2187 * anything other than the original IDENTIFY command.
2189 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
2190 if (err_mask && id[2] != 0x738c) {
2192 reason = "SPINUP failed";
2196 * If the drive initially returned incomplete IDENTIFY info,
2197 * we now must reissue the IDENTIFY command.
2199 if (id[2] == 0x37c8)
2203 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
2205 * The exact sequence expected by certain pre-ATA4 drives is:
2207 * IDENTIFY (optional in early ATA)
2208 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
2210 * Some drives were very specific about that exact sequence.
2212 * Note that ATA4 says lba is mandatory so the second check
2213 * shoud never trigger.
2215 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
2216 err_mask = ata_dev_init_params(dev, id[3], id[6]);
2219 reason = "INIT_DEV_PARAMS failed";
2223 /* current CHS translation info (id[53-58]) might be
2224 * changed. reread the identify device info.
2226 flags &= ~ATA_READID_POSTRESET;
2236 if (ata_msg_warn(ap))
2237 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
2238 "(%s, err_mask=0x%x)\n", reason, err_mask);
2242 static inline u8 ata_dev_knobble(struct ata_device *dev)
2244 struct ata_port *ap = dev->link->ap;
2245 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
2248 static void ata_dev_config_ncq(struct ata_device *dev,
2249 char *desc, size_t desc_sz)
2251 struct ata_port *ap = dev->link->ap;
2252 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2254 if (!ata_id_has_ncq(dev->id)) {
2258 if (dev->horkage & ATA_HORKAGE_NONCQ) {
2259 snprintf(desc, desc_sz, "NCQ (not used)");
2262 if (ap->flags & ATA_FLAG_NCQ) {
2263 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
2264 dev->flags |= ATA_DFLAG_NCQ;
2267 if (hdepth >= ddepth)
2268 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2270 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2274 * ata_dev_configure - Configure the specified ATA/ATAPI device
2275 * @dev: Target device to configure
2277 * Configure @dev according to @dev->id. Generic and low-level
2278 * driver specific fixups are also applied.
2281 * Kernel thread context (may sleep)
2284 * 0 on success, -errno otherwise
2286 int ata_dev_configure(struct ata_device *dev)
2288 struct ata_port *ap = dev->link->ap;
2289 struct ata_eh_context *ehc = &dev->link->eh_context;
2290 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
2291 const u16 *id = dev->id;
2292 unsigned long xfer_mask;
2293 char revbuf[7]; /* XYZ-99\0 */
2294 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2295 char modelbuf[ATA_ID_PROD_LEN+1];
2298 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
2299 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2304 if (ata_msg_probe(ap))
2305 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
2308 dev->horkage |= ata_dev_blacklisted(dev);
2309 ata_force_horkage(dev);
2311 /* let ACPI work its magic */
2312 rc = ata_acpi_on_devcfg(dev);
2316 /* massage HPA, do it early as it might change IDENTIFY data */
2317 rc = ata_hpa_resize(dev);
2321 /* print device capabilities */
2322 if (ata_msg_probe(ap))
2323 ata_dev_printk(dev, KERN_DEBUG,
2324 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2325 "85:%04x 86:%04x 87:%04x 88:%04x\n",
2327 id[49], id[82], id[83], id[84],
2328 id[85], id[86], id[87], id[88]);
2330 /* initialize to-be-configured parameters */
2331 dev->flags &= ~ATA_DFLAG_CFG_MASK;
2332 dev->max_sectors = 0;
2340 * common ATA, ATAPI feature tests
2343 /* find max transfer mode; for printk only */
2344 xfer_mask = ata_id_xfermask(id);
2346 if (ata_msg_probe(ap))
2349 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2350 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2353 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2356 /* ATA-specific feature tests */
2357 if (dev->class == ATA_DEV_ATA) {
2358 if (ata_id_is_cfa(id)) {
2359 if (id[162] & 1) /* CPRM may make this media unusable */
2360 ata_dev_printk(dev, KERN_WARNING,
2361 "supports DRM functions and may "
2362 "not be fully accessable.\n");
2363 snprintf(revbuf, 7, "CFA");
2365 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
2366 /* Warn the user if the device has TPM extensions */
2367 if (ata_id_has_tpm(id))
2368 ata_dev_printk(dev, KERN_WARNING,
2369 "supports DRM functions and may "
2370 "not be fully accessable.\n");
2373 dev->n_sectors = ata_id_n_sectors(id);
2375 if (dev->id[59] & 0x100)
2376 dev->multi_count = dev->id[59] & 0xff;
2378 if (ata_id_has_lba(id)) {
2379 const char *lba_desc;
2383 dev->flags |= ATA_DFLAG_LBA;
2384 if (ata_id_has_lba48(id)) {
2385 dev->flags |= ATA_DFLAG_LBA48;
2388 if (dev->n_sectors >= (1UL << 28) &&
2389 ata_id_has_flush_ext(id))
2390 dev->flags |= ATA_DFLAG_FLUSH_EXT;
2394 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2396 /* print device info to dmesg */
2397 if (ata_msg_drv(ap) && print_info) {
2398 ata_dev_printk(dev, KERN_INFO,
2399 "%s: %s, %s, max %s\n",
2400 revbuf, modelbuf, fwrevbuf,
2401 ata_mode_string(xfer_mask));
2402 ata_dev_printk(dev, KERN_INFO,
2403 "%Lu sectors, multi %u: %s %s\n",
2404 (unsigned long long)dev->n_sectors,
2405 dev->multi_count, lba_desc, ncq_desc);
2410 /* Default translation */
2411 dev->cylinders = id[1];
2413 dev->sectors = id[6];
2415 if (ata_id_current_chs_valid(id)) {
2416 /* Current CHS translation is valid. */
2417 dev->cylinders = id[54];
2418 dev->heads = id[55];
2419 dev->sectors = id[56];
2422 /* print device info to dmesg */
2423 if (ata_msg_drv(ap) && print_info) {
2424 ata_dev_printk(dev, KERN_INFO,
2425 "%s: %s, %s, max %s\n",
2426 revbuf, modelbuf, fwrevbuf,
2427 ata_mode_string(xfer_mask));
2428 ata_dev_printk(dev, KERN_INFO,
2429 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2430 (unsigned long long)dev->n_sectors,
2431 dev->multi_count, dev->cylinders,
2432 dev->heads, dev->sectors);
2439 /* ATAPI-specific feature tests */
2440 else if (dev->class == ATA_DEV_ATAPI) {
2441 const char *cdb_intr_string = "";
2442 const char *atapi_an_string = "";
2443 const char *dma_dir_string = "";
2446 rc = atapi_cdb_len(id);
2447 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2448 if (ata_msg_warn(ap))
2449 ata_dev_printk(dev, KERN_WARNING,
2450 "unsupported CDB len\n");
2454 dev->cdb_len = (unsigned int) rc;
2456 /* Enable ATAPI AN if both the host and device have
2457 * the support. If PMP is attached, SNTF is required
2458 * to enable ATAPI AN to discern between PHY status
2459 * changed notifications and ATAPI ANs.
2461 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2462 (!ap->nr_pmp_links ||
2463 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
2464 unsigned int err_mask;
2466 /* issue SET feature command to turn this on */
2467 err_mask = ata_dev_set_feature(dev,
2468 SETFEATURES_SATA_ENABLE, SATA_AN);
2470 ata_dev_printk(dev, KERN_ERR,
2471 "failed to enable ATAPI AN "
2472 "(err_mask=0x%x)\n", err_mask);
2474 dev->flags |= ATA_DFLAG_AN;
2475 atapi_an_string = ", ATAPI AN";
2479 if (ata_id_cdb_intr(dev->id)) {
2480 dev->flags |= ATA_DFLAG_CDB_INTR;
2481 cdb_intr_string = ", CDB intr";
2484 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2485 dev->flags |= ATA_DFLAG_DMADIR;
2486 dma_dir_string = ", DMADIR";
2489 /* print device info to dmesg */
2490 if (ata_msg_drv(ap) && print_info)
2491 ata_dev_printk(dev, KERN_INFO,
2492 "ATAPI: %s, %s, max %s%s%s%s\n",
2494 ata_mode_string(xfer_mask),
2495 cdb_intr_string, atapi_an_string,
2499 /* determine max_sectors */
2500 dev->max_sectors = ATA_MAX_SECTORS;
2501 if (dev->flags & ATA_DFLAG_LBA48)
2502 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2504 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2505 if (ata_id_has_hipm(dev->id))
2506 dev->flags |= ATA_DFLAG_HIPM;
2507 if (ata_id_has_dipm(dev->id))
2508 dev->flags |= ATA_DFLAG_DIPM;
2511 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2513 if (ata_dev_knobble(dev)) {
2514 if (ata_msg_drv(ap) && print_info)
2515 ata_dev_printk(dev, KERN_INFO,
2516 "applying bridge limits\n");
2517 dev->udma_mask &= ATA_UDMA5;
2518 dev->max_sectors = ATA_MAX_SECTORS;
2521 if ((dev->class == ATA_DEV_ATAPI) &&
2522 (atapi_command_packet_set(id) == TYPE_TAPE)) {
2523 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2524 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2527 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2528 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2531 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2532 dev->horkage |= ATA_HORKAGE_IPM;
2534 /* reset link pm_policy for this port to no pm */
2535 ap->pm_policy = MAX_PERFORMANCE;
2538 if (ap->ops->dev_config)
2539 ap->ops->dev_config(dev);
2541 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2542 /* Let the user know. We don't want to disallow opens for
2543 rescue purposes, or in case the vendor is just a blithering
2544 idiot. Do this after the dev_config call as some controllers
2545 with buggy firmware may want to avoid reporting false device
2549 ata_dev_printk(dev, KERN_WARNING,
2550 "Drive reports diagnostics failure. This may indicate a drive\n");
2551 ata_dev_printk(dev, KERN_WARNING,
2552 "fault or invalid emulation. Contact drive vendor for information.\n");
2556 if (ata_msg_probe(ap))
2557 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2558 __func__, ata_chk_status(ap));
2562 if (ata_msg_probe(ap))
2563 ata_dev_printk(dev, KERN_DEBUG,
2564 "%s: EXIT, err\n", __func__);
2569 * ata_cable_40wire - return 40 wire cable type
2572 * Helper method for drivers which want to hardwire 40 wire cable
2576 int ata_cable_40wire(struct ata_port *ap)
2578 return ATA_CBL_PATA40;
2582 * ata_cable_80wire - return 80 wire cable type
2585 * Helper method for drivers which want to hardwire 80 wire cable
2589 int ata_cable_80wire(struct ata_port *ap)
2591 return ATA_CBL_PATA80;
2595 * ata_cable_unknown - return unknown PATA cable.
2598 * Helper method for drivers which have no PATA cable detection.
2601 int ata_cable_unknown(struct ata_port *ap)
2603 return ATA_CBL_PATA_UNK;
2607 * ata_cable_ignore - return ignored PATA cable.
2610 * Helper method for drivers which don't use cable type to limit
2613 int ata_cable_ignore(struct ata_port *ap)
2615 return ATA_CBL_PATA_IGN;
2619 * ata_cable_sata - return SATA cable type
2622 * Helper method for drivers which have SATA cables
2625 int ata_cable_sata(struct ata_port *ap)
2627 return ATA_CBL_SATA;
2631 * ata_bus_probe - Reset and probe ATA bus
2634 * Master ATA bus probing function. Initiates a hardware-dependent
2635 * bus reset, then attempts to identify any devices found on
2639 * PCI/etc. bus probe sem.
2642 * Zero on success, negative errno otherwise.
2645 int ata_bus_probe(struct ata_port *ap)
2647 unsigned int classes[ATA_MAX_DEVICES];
2648 int tries[ATA_MAX_DEVICES];
2650 struct ata_device *dev;
2654 ata_link_for_each_dev(dev, &ap->link)
2655 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2658 ata_link_for_each_dev(dev, &ap->link) {
2659 /* If we issue an SRST then an ATA drive (not ATAPI)
2660 * may change configuration and be in PIO0 timing. If
2661 * we do a hard reset (or are coming from power on)
2662 * this is true for ATA or ATAPI. Until we've set a
2663 * suitable controller mode we should not touch the
2664 * bus as we may be talking too fast.
2666 dev->pio_mode = XFER_PIO_0;
2668 /* If the controller has a pio mode setup function
2669 * then use it to set the chipset to rights. Don't
2670 * touch the DMA setup as that will be dealt with when
2671 * configuring devices.
2673 if (ap->ops->set_piomode)
2674 ap->ops->set_piomode(ap, dev);
2677 /* reset and determine device classes */
2678 ap->ops->phy_reset(ap);
2680 ata_link_for_each_dev(dev, &ap->link) {
2681 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2682 dev->class != ATA_DEV_UNKNOWN)
2683 classes[dev->devno] = dev->class;
2685 classes[dev->devno] = ATA_DEV_NONE;
2687 dev->class = ATA_DEV_UNKNOWN;
2692 /* read IDENTIFY page and configure devices. We have to do the identify
2693 specific sequence bass-ackwards so that PDIAG- is released by
2696 ata_link_for_each_dev_reverse(dev, &ap->link) {
2697 if (tries[dev->devno])
2698 dev->class = classes[dev->devno];
2700 if (!ata_dev_enabled(dev))
2703 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2709 /* Now ask for the cable type as PDIAG- should have been released */
2710 if (ap->ops->cable_detect)
2711 ap->cbl = ap->ops->cable_detect(ap);
2713 /* We may have SATA bridge glue hiding here irrespective of the
2714 reported cable types and sensed types */
2715 ata_link_for_each_dev(dev, &ap->link) {
2716 if (!ata_dev_enabled(dev))
2718 /* SATA drives indicate we have a bridge. We don't know which
2719 end of the link the bridge is which is a problem */
2720 if (ata_id_is_sata(dev->id))
2721 ap->cbl = ATA_CBL_SATA;
2724 /* After the identify sequence we can now set up the devices. We do
2725 this in the normal order so that the user doesn't get confused */
2727 ata_link_for_each_dev(dev, &ap->link) {
2728 if (!ata_dev_enabled(dev))
2731 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2732 rc = ata_dev_configure(dev);
2733 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2738 /* configure transfer mode */
2739 rc = ata_set_mode(&ap->link, &dev);
2743 ata_link_for_each_dev(dev, &ap->link)
2744 if (ata_dev_enabled(dev))
2747 /* no device present, disable port */
2748 ata_port_disable(ap);
2752 tries[dev->devno]--;
2756 /* eeek, something went very wrong, give up */
2757 tries[dev->devno] = 0;
2761 /* give it just one more chance */
2762 tries[dev->devno] = min(tries[dev->devno], 1);
2764 if (tries[dev->devno] == 1) {
2765 /* This is the last chance, better to slow
2766 * down than lose it.
2768 sata_down_spd_limit(&ap->link);
2769 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2773 if (!tries[dev->devno])
2774 ata_dev_disable(dev);
2780 * ata_port_probe - Mark port as enabled
2781 * @ap: Port for which we indicate enablement
2783 * Modify @ap data structure such that the system
2784 * thinks that the entire port is enabled.
2786 * LOCKING: host lock, or some other form of
2790 void ata_port_probe(struct ata_port *ap)
2792 ap->flags &= ~ATA_FLAG_DISABLED;
2796 * sata_print_link_status - Print SATA link status
2797 * @link: SATA link to printk link status about
2799 * This function prints link speed and status of a SATA link.
2804 void sata_print_link_status(struct ata_link *link)
2806 u32 sstatus, scontrol, tmp;
2808 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2810 sata_scr_read(link, SCR_CONTROL, &scontrol);
2812 if (ata_link_online(link)) {
2813 tmp = (sstatus >> 4) & 0xf;
2814 ata_link_printk(link, KERN_INFO,
2815 "SATA link up %s (SStatus %X SControl %X)\n",
2816 sata_spd_string(tmp), sstatus, scontrol);
2818 ata_link_printk(link, KERN_INFO,
2819 "SATA link down (SStatus %X SControl %X)\n",
2825 * ata_dev_pair - return other device on cable
2828 * Obtain the other device on the same cable, or if none is
2829 * present NULL is returned
2832 struct ata_device *ata_dev_pair(struct ata_device *adev)
2834 struct ata_link *link = adev->link;
2835 struct ata_device *pair = &link->device[1 - adev->devno];
2836 if (!ata_dev_enabled(pair))
2842 * ata_port_disable - Disable port.
2843 * @ap: Port to be disabled.
2845 * Modify @ap data structure such that the system
2846 * thinks that the entire port is disabled, and should
2847 * never attempt to probe or communicate with devices
2850 * LOCKING: host lock, or some other form of
2854 void ata_port_disable(struct ata_port *ap)
2856 ap->link.device[0].class = ATA_DEV_NONE;
2857 ap->link.device[1].class = ATA_DEV_NONE;
2858 ap->flags |= ATA_FLAG_DISABLED;
2862 * sata_down_spd_limit - adjust SATA spd limit downward
2863 * @link: Link to adjust SATA spd limit for
2865 * Adjust SATA spd limit of @link downward. Note that this
2866 * function only adjusts the limit. The change must be applied
2867 * using sata_set_spd().
2870 * Inherited from caller.
2873 * 0 on success, negative errno on failure
2875 int sata_down_spd_limit(struct ata_link *link)
2877 u32 sstatus, spd, mask;
2880 if (!sata_scr_valid(link))
2883 /* If SCR can be read, use it to determine the current SPD.
2884 * If not, use cached value in link->sata_spd.
2886 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2888 spd = (sstatus >> 4) & 0xf;
2890 spd = link->sata_spd;
2892 mask = link->sata_spd_limit;
2896 /* unconditionally mask off the highest bit */
2897 highbit = fls(mask) - 1;
2898 mask &= ~(1 << highbit);
2900 /* Mask off all speeds higher than or equal to the current
2901 * one. Force 1.5Gbps if current SPD is not available.
2904 mask &= (1 << (spd - 1)) - 1;
2908 /* were we already at the bottom? */
2912 link->sata_spd_limit = mask;
2914 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2915 sata_spd_string(fls(mask)));
2920 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2922 struct ata_link *host_link = &link->ap->link;
2923 u32 limit, target, spd;
2925 limit = link->sata_spd_limit;
2927 /* Don't configure downstream link faster than upstream link.
2928 * It doesn't speed up anything and some PMPs choke on such
2931 if (!ata_is_host_link(link) && host_link->sata_spd)
2932 limit &= (1 << host_link->sata_spd) - 1;
2934 if (limit == UINT_MAX)
2937 target = fls(limit);
2939 spd = (*scontrol >> 4) & 0xf;
2940 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
2942 return spd != target;
2946 * sata_set_spd_needed - is SATA spd configuration needed
2947 * @link: Link in question
2949 * Test whether the spd limit in SControl matches
2950 * @link->sata_spd_limit. This function is used to determine
2951 * whether hardreset is necessary to apply SATA spd
2955 * Inherited from caller.
2958 * 1 if SATA spd configuration is needed, 0 otherwise.
2960 int sata_set_spd_needed(struct ata_link *link)
2964 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2967 return __sata_set_spd_needed(link, &scontrol);
2971 * sata_set_spd - set SATA spd according to spd limit
2972 * @link: Link to set SATA spd for
2974 * Set SATA spd of @link according to sata_spd_limit.
2977 * Inherited from caller.
2980 * 0 if spd doesn't need to be changed, 1 if spd has been
2981 * changed. Negative errno if SCR registers are inaccessible.
2983 int sata_set_spd(struct ata_link *link)
2988 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2991 if (!__sata_set_spd_needed(link, &scontrol))
2994 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3001 * This mode timing computation functionality is ported over from
3002 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3005 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
3006 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
3007 * for UDMA6, which is currently supported only by Maxtor drives.
3009 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
3012 static const struct ata_timing ata_timing[] = {
3013 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3014 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3015 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3016 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3017 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3018 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3019 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3020 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
3022 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3023 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3024 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
3026 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3027 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3028 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
3029 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
3030 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
3032 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
3033 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3034 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3035 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3036 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3037 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3038 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3039 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
3044 #define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3045 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
3047 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3049 q->setup = EZ(t->setup * 1000, T);
3050 q->act8b = EZ(t->act8b * 1000, T);
3051 q->rec8b = EZ(t->rec8b * 1000, T);
3052 q->cyc8b = EZ(t->cyc8b * 1000, T);
3053 q->active = EZ(t->active * 1000, T);
3054 q->recover = EZ(t->recover * 1000, T);
3055 q->cycle = EZ(t->cycle * 1000, T);
3056 q->udma = EZ(t->udma * 1000, UT);
3059 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3060 struct ata_timing *m, unsigned int what)
3062 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3063 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3064 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3065 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3066 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3067 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3068 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3069 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3072 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
3074 const struct ata_timing *t = ata_timing;
3076 while (xfer_mode > t->mode)
3079 if (xfer_mode == t->mode)
3084 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3085 struct ata_timing *t, int T, int UT)
3087 const struct ata_timing *s;
3088 struct ata_timing p;
3094 if (!(s = ata_timing_find_mode(speed)))
3097 memcpy(t, s, sizeof(*s));
3100 * If the drive is an EIDE drive, it can tell us it needs extended
3101 * PIO/MW_DMA cycle timing.
3104 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3105 memset(&p, 0, sizeof(p));
3106 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
3107 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3108 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
3109 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
3110 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3112 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3116 * Convert the timing to bus clock counts.
3119 ata_timing_quantize(t, t, T, UT);
3122 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3123 * S.M.A.R.T * and some other commands. We have to ensure that the
3124 * DMA cycle timing is slower/equal than the fastest PIO timing.
3127 if (speed > XFER_PIO_6) {
3128 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3129 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3133 * Lengthen active & recovery time so that cycle time is correct.
3136 if (t->act8b + t->rec8b < t->cyc8b) {
3137 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3138 t->rec8b = t->cyc8b - t->act8b;
3141 if (t->active + t->recover < t->cycle) {
3142 t->active += (t->cycle - (t->active + t->recover)) / 2;
3143 t->recover = t->cycle - t->active;
3146 /* In a few cases quantisation may produce enough errors to
3147 leave t->cycle too low for the sum of active and recovery
3148 if so we must correct this */
3149 if (t->active + t->recover > t->cycle)
3150 t->cycle = t->active + t->recover;
3156 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3157 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3158 * @cycle: cycle duration in ns
3160 * Return matching xfer mode for @cycle. The returned mode is of
3161 * the transfer type specified by @xfer_shift. If @cycle is too
3162 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3163 * than the fastest known mode, the fasted mode is returned.
3169 * Matching xfer_mode, 0xff if no match found.
3171 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3173 u8 base_mode = 0xff, last_mode = 0xff;
3174 const struct ata_xfer_ent *ent;
3175 const struct ata_timing *t;
3177 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3178 if (ent->shift == xfer_shift)
3179 base_mode = ent->base;
3181 for (t = ata_timing_find_mode(base_mode);
3182 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3183 unsigned short this_cycle;
3185 switch (xfer_shift) {
3187 case ATA_SHIFT_MWDMA:
3188 this_cycle = t->cycle;
3190 case ATA_SHIFT_UDMA:
3191 this_cycle = t->udma;
3197 if (cycle > this_cycle)
3200 last_mode = t->mode;
3207 * ata_down_xfermask_limit - adjust dev xfer masks downward
3208 * @dev: Device to adjust xfer masks
3209 * @sel: ATA_DNXFER_* selector
3211 * Adjust xfer masks of @dev downward. Note that this function
3212 * does not apply the change. Invoking ata_set_mode() afterwards
3213 * will apply the limit.
3216 * Inherited from caller.
3219 * 0 on success, negative errno on failure
3221 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
3224 unsigned long orig_mask, xfer_mask;
3225 unsigned long pio_mask, mwdma_mask, udma_mask;
3228 quiet = !!(sel & ATA_DNXFER_QUIET);
3229 sel &= ~ATA_DNXFER_QUIET;
3231 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3234 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
3237 case ATA_DNXFER_PIO:
3238 highbit = fls(pio_mask) - 1;
3239 pio_mask &= ~(1 << highbit);
3242 case ATA_DNXFER_DMA:
3244 highbit = fls(udma_mask) - 1;
3245 udma_mask &= ~(1 << highbit);
3248 } else if (mwdma_mask) {
3249 highbit = fls(mwdma_mask) - 1;
3250 mwdma_mask &= ~(1 << highbit);
3256 case ATA_DNXFER_40C:
3257 udma_mask &= ATA_UDMA_MASK_40C;
3260 case ATA_DNXFER_FORCE_PIO0:
3262 case ATA_DNXFER_FORCE_PIO:
3271 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3273 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3277 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3278 snprintf(buf, sizeof(buf), "%s:%s",
3279 ata_mode_string(xfer_mask),
3280 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3282 snprintf(buf, sizeof(buf), "%s",
3283 ata_mode_string(xfer_mask));
3285 ata_dev_printk(dev, KERN_WARNING,
3286 "limiting speed to %s\n", buf);
3289 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3295 static int ata_dev_set_mode(struct ata_device *dev)
3297 struct ata_eh_context *ehc = &dev->link->eh_context;
3298 const char *dev_err_whine = "";
3299 int ign_dev_err = 0;
3300 unsigned int err_mask;
3303 dev->flags &= ~ATA_DFLAG_PIO;
3304 if (dev->xfer_shift == ATA_SHIFT_PIO)
3305 dev->flags |= ATA_DFLAG_PIO;
3307 err_mask = ata_dev_set_xfermode(dev);
3309 if (err_mask & ~AC_ERR_DEV)
3313 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3314 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3315 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3319 /* Old CFA may refuse this command, which is just fine */
3320 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
3323 /* Some very old devices and some bad newer ones fail any kind of
3324 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3325 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3326 dev->pio_mode <= XFER_PIO_2)
3329 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3330 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3331 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3332 dev->dma_mode == XFER_MW_DMA_0 &&
3333 (dev->id[63] >> 8) & 1)
3336 /* if the device is actually configured correctly, ignore dev err */
3337 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3340 if (err_mask & AC_ERR_DEV) {
3344 dev_err_whine = " (device error ignored)";
3347 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3348 dev->xfer_shift, (int)dev->xfer_mode);
3350 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3351 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3357 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3358 "(err_mask=0x%x)\n", err_mask);
3363 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
3364 * @link: link on which timings will be programmed
3365 * @r_failed_dev: out parameter for failed device
3367 * Standard implementation of the function used to tune and set
3368 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3369 * ata_dev_set_mode() fails, pointer to the failing device is
3370 * returned in @r_failed_dev.
3373 * PCI/etc. bus probe sem.
3376 * 0 on success, negative errno otherwise
3379 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3381 struct ata_port *ap = link->ap;
3382 struct ata_device *dev;
3383 int rc = 0, used_dma = 0, found = 0;
3385 /* step 1: calculate xfer_mask */
3386 ata_link_for_each_dev(dev, link) {
3387 unsigned long pio_mask, dma_mask;
3388 unsigned int mode_mask;
3390 if (!ata_dev_enabled(dev))
3393 mode_mask = ATA_DMA_MASK_ATA;
3394 if (dev->class == ATA_DEV_ATAPI)
3395 mode_mask = ATA_DMA_MASK_ATAPI;
3396 else if (ata_id_is_cfa(dev->id))
3397 mode_mask = ATA_DMA_MASK_CFA;
3399 ata_dev_xfermask(dev);
3400 ata_force_xfermask(dev);
3402 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3403 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3405 if (libata_dma_mask & mode_mask)
3406 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3410 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3411 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
3414 if (dev->dma_mode != 0xff)
3420 /* step 2: always set host PIO timings */
3421 ata_link_for_each_dev(dev, link) {
3422 if (!ata_dev_enabled(dev))
3425 if (dev->pio_mode == 0xff) {
3426 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
3431 dev->xfer_mode = dev->pio_mode;
3432 dev->xfer_shift = ATA_SHIFT_PIO;
3433 if (ap->ops->set_piomode)
3434 ap->ops->set_piomode(ap, dev);
3437 /* step 3: set host DMA timings */
3438 ata_link_for_each_dev(dev, link) {
3439 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
3442 dev->xfer_mode = dev->dma_mode;
3443 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3444 if (ap->ops->set_dmamode)
3445 ap->ops->set_dmamode(ap, dev);
3448 /* step 4: update devices' xfer mode */
3449 ata_link_for_each_dev(dev, link) {
3450 /* don't update suspended devices' xfer mode */
3451 if (!ata_dev_enabled(dev))
3454 rc = ata_dev_set_mode(dev);
3459 /* Record simplex status. If we selected DMA then the other
3460 * host channels are not permitted to do so.
3462 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
3463 ap->host->simplex_claimed = ap;
3467 *r_failed_dev = dev;
3472 * ata_tf_to_host - issue ATA taskfile to host controller
3473 * @ap: port to which command is being issued
3474 * @tf: ATA taskfile register set
3476 * Issues ATA taskfile register set to ATA host controller,
3477 * with proper synchronization with interrupt handler and
3481 * spin_lock_irqsave(host lock)
3484 static inline void ata_tf_to_host(struct ata_port *ap,
3485 const struct ata_taskfile *tf)
3487 ap->ops->tf_load(ap, tf);
3488 ap->ops->exec_command(ap, tf);
3492 * ata_busy_sleep - sleep until BSY clears, or timeout
3493 * @ap: port containing status register to be polled
3494 * @tmout_pat: impatience timeout
3495 * @tmout: overall timeout
3497 * Sleep until ATA Status register bit BSY clears,
3498 * or a timeout occurs.
3501 * Kernel thread context (may sleep).
3504 * 0 on success, -errno otherwise.
3506 int ata_busy_sleep(struct ata_port *ap,
3507 unsigned long tmout_pat, unsigned long tmout)
3509 unsigned long timer_start, timeout;
3512 status = ata_busy_wait(ap, ATA_BUSY, 300);
3513 timer_start = jiffies;
3514 timeout = timer_start + tmout_pat;
3515 while (status != 0xff && (status & ATA_BUSY) &&
3516 time_before(jiffies, timeout)) {
3518 status = ata_busy_wait(ap, ATA_BUSY, 3);
3521 if (status != 0xff && (status & ATA_BUSY))
3522 ata_port_printk(ap, KERN_WARNING,
3523 "port is slow to respond, please be patient "
3524 "(Status 0x%x)\n", status);
3526 timeout = timer_start + tmout;
3527 while (status != 0xff && (status & ATA_BUSY) &&
3528 time_before(jiffies, timeout)) {
3530 status = ata_chk_status(ap);
3536 if (status & ATA_BUSY) {
3537 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3538 "(%lu secs, Status 0x%x)\n",
3539 tmout / HZ, status);
3547 * ata_wait_after_reset - wait before checking status after reset
3548 * @ap: port containing status register to be polled
3549 * @deadline: deadline jiffies for the operation
3551 * After reset, we need to pause a while before reading status.
3552 * Also, certain combination of controller and device report 0xff
3553 * for some duration (e.g. until SATA PHY is up and running)
3554 * which is interpreted as empty port in ATA world. This
3555 * function also waits for such devices to get out of 0xff
3559 * Kernel thread context (may sleep).
3561 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3563 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3565 if (time_before(until, deadline))
3568 /* Spec mandates ">= 2ms" before checking status. We wait
3569 * 150ms, because that was the magic delay used for ATAPI
3570 * devices in Hale Landis's ATADRVR, for the period of time
3571 * between when the ATA command register is written, and then
3572 * status is checked. Because waiting for "a while" before
3573 * checking status is fine, post SRST, we perform this magic
3574 * delay here as well.
3576 * Old drivers/ide uses the 2mS rule and then waits for ready.
3580 /* Wait for 0xff to clear. Some SATA devices take a long time
3581 * to clear 0xff after reset. For example, HHD424020F7SV00
3582 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3585 * Note that some PATA controllers (pata_ali) explode if
3586 * status register is read more than once when there's no
3589 if (ap->flags & ATA_FLAG_SATA) {
3591 u8 status = ata_chk_status(ap);
3593 if (status != 0xff || time_after(jiffies, deadline))
3602 * ata_wait_ready - sleep until BSY clears, or timeout
3603 * @ap: port containing status register to be polled
3604 * @deadline: deadline jiffies for the operation
3606 * Sleep until ATA Status register bit BSY clears, or timeout
3610 * Kernel thread context (may sleep).
3613 * 0 on success, -errno otherwise.
3615 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3617 unsigned long start = jiffies;
3621 u8 status = ata_chk_status(ap);
3622 unsigned long now = jiffies;
3624 if (!(status & ATA_BUSY))
3626 if (!ata_link_online(&ap->link) && status == 0xff)
3628 if (time_after(now, deadline))
3631 if (!warned && time_after(now, start + 5 * HZ) &&
3632 (deadline - now > 3 * HZ)) {
3633 ata_port_printk(ap, KERN_WARNING,
3634 "port is slow to respond, please be patient "
3635 "(Status 0x%x)\n", status);
3643 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3644 unsigned long deadline)
3646 struct ata_ioports *ioaddr = &ap->ioaddr;
3647 unsigned int dev0 = devmask & (1 << 0);
3648 unsigned int dev1 = devmask & (1 << 1);
3651 /* if device 0 was found in ata_devchk, wait for its
3655 rc = ata_wait_ready(ap, deadline);
3663 /* if device 1 was found in ata_devchk, wait for register
3664 * access briefly, then wait for BSY to clear.
3669 ap->ops->dev_select(ap, 1);
3671 /* Wait for register access. Some ATAPI devices fail
3672 * to set nsect/lbal after reset, so don't waste too
3673 * much time on it. We're gonna wait for !BSY anyway.
3675 for (i = 0; i < 2; i++) {
3678 nsect = ioread8(ioaddr->nsect_addr);
3679 lbal = ioread8(ioaddr->lbal_addr);
3680 if ((nsect == 1) && (lbal == 1))
3682 msleep(50); /* give drive a breather */
3685 rc = ata_wait_ready(ap, deadline);
3693 /* is all this really necessary? */
3694 ap->ops->dev_select(ap, 0);
3696 ap->ops->dev_select(ap, 1);
3698 ap->ops->dev_select(ap, 0);
3703 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3704 unsigned long deadline)
3706 struct ata_ioports *ioaddr = &ap->ioaddr;
3708 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3710 /* software reset. causes dev0 to be selected */
3711 iowrite8(ap->ctl, ioaddr->ctl_addr);
3712 udelay(20); /* FIXME: flush */
3713 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3714 udelay(20); /* FIXME: flush */
3715 iowrite8(ap->ctl, ioaddr->ctl_addr);
3717 /* wait a while before checking status */
3718 ata_wait_after_reset(ap, deadline);
3720 /* Before we perform post reset processing we want to see if
3721 * the bus shows 0xFF because the odd clown forgets the D7
3722 * pulldown resistor.
3724 if (ata_chk_status(ap) == 0xFF)
3727 return ata_bus_post_reset(ap, devmask, deadline);
3731 * ata_bus_reset - reset host port and associated ATA channel
3732 * @ap: port to reset
3734 * This is typically the first time we actually start issuing
3735 * commands to the ATA channel. We wait for BSY to clear, then
3736 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3737 * result. Determine what devices, if any, are on the channel
3738 * by looking at the device 0/1 error register. Look at the signature
3739 * stored in each device's taskfile registers, to determine if
3740 * the device is ATA or ATAPI.
3743 * PCI/etc. bus probe sem.
3744 * Obtains host lock.
3747 * Sets ATA_FLAG_DISABLED if bus reset fails.
3750 void ata_bus_reset(struct ata_port *ap)
3752 struct ata_device *device = ap->link.device;
3753 struct ata_ioports *ioaddr = &ap->ioaddr;
3754 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3756 unsigned int dev0, dev1 = 0, devmask = 0;
3759 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3761 /* determine if device 0/1 are present */
3762 if (ap->flags & ATA_FLAG_SATA_RESET)
3765 dev0 = ata_devchk(ap, 0);
3767 dev1 = ata_devchk(ap, 1);
3771 devmask |= (1 << 0);
3773 devmask |= (1 << 1);
3775 /* select device 0 again */
3776 ap->ops->dev_select(ap, 0);
3778 /* issue bus reset */
3779 if (ap->flags & ATA_FLAG_SRST) {
3780 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3781 if (rc && rc != -ENODEV)
3786 * determine by signature whether we have ATA or ATAPI devices
3788 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3789 if ((slave_possible) && (err != 0x81))
3790 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3792 /* is double-select really necessary? */
3793 if (device[1].class != ATA_DEV_NONE)
3794 ap->ops->dev_select(ap, 1);
3795 if (device[0].class != ATA_DEV_NONE)
3796 ap->ops->dev_select(ap, 0);
3798 /* if no devices were detected, disable this port */
3799 if ((device[0].class == ATA_DEV_NONE) &&
3800 (device[1].class == ATA_DEV_NONE))
3803 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3804 /* set up device control for ATA_FLAG_SATA_RESET */
3805 iowrite8(ap->ctl, ioaddr->ctl_addr);
3812 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3813 ata_port_disable(ap);
3819 * sata_link_debounce - debounce SATA phy status
3820 * @link: ATA link to debounce SATA phy status for
3821 * @params: timing parameters { interval, duratinon, timeout } in msec
3822 * @deadline: deadline jiffies for the operation
3824 * Make sure SStatus of @link reaches stable state, determined by
3825 * holding the same value where DET is not 1 for @duration polled
3826 * every @interval, before @timeout. Timeout constraints the
3827 * beginning of the stable state. Because DET gets stuck at 1 on
3828 * some controllers after hot unplugging, this functions waits
3829 * until timeout then returns 0 if DET is stable at 1.
3831 * @timeout is further limited by @deadline. The sooner of the
3835 * Kernel thread context (may sleep)
3838 * 0 on success, -errno on failure.
3840 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3841 unsigned long deadline)
3843 unsigned long interval_msec = params[0];
3844 unsigned long duration = msecs_to_jiffies(params[1]);
3845 unsigned long last_jiffies, t;
3849 t = jiffies + msecs_to_jiffies(params[2]);
3850 if (time_before(t, deadline))
3853 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3858 last_jiffies = jiffies;
3861 msleep(interval_msec);
3862 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3868 if (cur == 1 && time_before(jiffies, deadline))
3870 if (time_after(jiffies, last_jiffies + duration))
3875 /* unstable, start over */
3877 last_jiffies = jiffies;
3879 /* Check deadline. If debouncing failed, return
3880 * -EPIPE to tell upper layer to lower link speed.
3882 if (time_after(jiffies, deadline))
3888 * sata_link_resume - resume SATA link
3889 * @link: ATA link to resume SATA
3890 * @params: timing parameters { interval, duratinon, timeout } in msec
3891 * @deadline: deadline jiffies for the operation
3893 * Resume SATA phy @link and debounce it.
3896 * Kernel thread context (may sleep)
3899 * 0 on success, -errno on failure.
3901 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3902 unsigned long deadline)
3907 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3910 scontrol = (scontrol & 0x0f0) | 0x300;
3912 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3915 /* Some PHYs react badly if SStatus is pounded immediately
3916 * after resuming. Delay 200ms before debouncing.
3920 return sata_link_debounce(link, params, deadline);
3924 * ata_std_prereset - prepare for reset
3925 * @link: ATA link to be reset
3926 * @deadline: deadline jiffies for the operation
3928 * @link is about to be reset. Initialize it. Failure from
3929 * prereset makes libata abort whole reset sequence and give up
3930 * that port, so prereset should be best-effort. It does its
3931 * best to prepare for reset sequence but if things go wrong, it
3932 * should just whine, not fail.
3935 * Kernel thread context (may sleep)
3938 * 0 on success, -errno otherwise.
3940 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3942 struct ata_port *ap = link->ap;
3943 struct ata_eh_context *ehc = &link->eh_context;
3944 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3947 /* handle link resume */
3948 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3949 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3950 ehc->i.action |= ATA_EH_HARDRESET;
3952 /* Some PMPs don't work with only SRST, force hardreset if PMP
3955 if (ap->flags & ATA_FLAG_PMP)
3956 ehc->i.action |= ATA_EH_HARDRESET;
3958 /* if we're about to do hardreset, nothing more to do */
3959 if (ehc->i.action & ATA_EH_HARDRESET)
3962 /* if SATA, resume link */
3963 if (ap->flags & ATA_FLAG_SATA) {
3964 rc = sata_link_resume(link, timing, deadline);
3965 /* whine about phy resume failure but proceed */
3966 if (rc && rc != -EOPNOTSUPP)
3967 ata_link_printk(link, KERN_WARNING, "failed to resume "
3968 "link for reset (errno=%d)\n", rc);
3971 /* Wait for !BSY if the controller can wait for the first D2H
3972 * Reg FIS and we don't know that no device is attached.
3974 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3975 rc = ata_wait_ready(ap, deadline);
3976 if (rc && rc != -ENODEV) {
3977 ata_link_printk(link, KERN_WARNING, "device not ready "
3978 "(errno=%d), forcing hardreset\n", rc);
3979 ehc->i.action |= ATA_EH_HARDRESET;
3987 * ata_std_softreset - reset host port via ATA SRST
3988 * @link: ATA link to reset
3989 * @classes: resulting classes of attached devices
3990 * @deadline: deadline jiffies for the operation
3992 * Reset host port using ATA SRST.
3995 * Kernel thread context (may sleep)
3998 * 0 on success, -errno otherwise.
4000 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
4001 unsigned long deadline)
4003 struct ata_port *ap = link->ap;
4004 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
4005 unsigned int devmask = 0;
4011 if (ata_link_offline(link)) {
4012 classes[0] = ATA_DEV_NONE;
4016 /* determine if device 0/1 are present */
4017 if (ata_devchk(ap, 0))
4018 devmask |= (1 << 0);
4019 if (slave_possible && ata_devchk(ap, 1))
4020 devmask |= (1 << 1);
4022 /* select device 0 again */
4023 ap->ops->dev_select(ap, 0);
4025 /* issue bus reset */
4026 DPRINTK("about to softreset, devmask=%x\n", devmask);
4027 rc = ata_bus_softreset(ap, devmask, deadline);
4028 /* if link is occupied, -ENODEV too is an error */
4029 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
4030 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
4034 /* determine by signature whether we have ATA or ATAPI devices */
4035 classes[0] = ata_dev_try_classify(&link->device[0],
4036 devmask & (1 << 0), &err);
4037 if (slave_possible && err != 0x81)
4038 classes[1] = ata_dev_try_classify(&link->device[1],
4039 devmask & (1 << 1), &err);
4042 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4047 * sata_link_hardreset - reset link via SATA phy reset
4048 * @link: link to reset
4049 * @timing: timing parameters { interval, duratinon, timeout } in msec
4050 * @deadline: deadline jiffies for the operation
4052 * SATA phy-reset @link using DET bits of SControl register.
4055 * Kernel thread context (may sleep)
4058 * 0 on success, -errno otherwise.
4060 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
4061 unsigned long deadline)
4068 if (sata_set_spd_needed(link)) {
4069 /* SATA spec says nothing about how to reconfigure
4070 * spd. To be on the safe side, turn off phy during
4071 * reconfiguration. This works for at least ICH7 AHCI
4074 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4077 scontrol = (scontrol & 0x0f0) | 0x304;
4079 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
4085 /* issue phy wake/reset */
4086 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
4089 scontrol = (scontrol & 0x0f0) | 0x301;
4091 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
4094 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
4095 * 10.4.2 says at least 1 ms.
4099 /* bring link back */
4100 rc = sata_link_resume(link, timing, deadline);
4102 DPRINTK("EXIT, rc=%d\n", rc);
4107 * sata_std_hardreset - reset host port via SATA phy reset
4108 * @link: link to reset
4109 * @class: resulting class of attached device
4110 * @deadline: deadline jiffies for the operation
4112 * SATA phy-reset host port using DET bits of SControl register,
4113 * wait for !BSY and classify the attached device.
4116 * Kernel thread context (may sleep)
4119 * 0 on success, -errno otherwise.
4121 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
4122 unsigned long deadline)
4124 struct ata_port *ap = link->ap;
4125 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
4131 rc = sata_link_hardreset(link, timing, deadline);
4133 ata_link_printk(link, KERN_ERR,
4134 "COMRESET failed (errno=%d)\n", rc);
4138 /* TODO: phy layer with polling, timeouts, etc. */
4139 if (ata_link_offline(link)) {
4140 *class = ATA_DEV_NONE;
4141 DPRINTK("EXIT, link offline\n");
4145 /* wait a while before checking status */
4146 ata_wait_after_reset(ap, deadline);
4148 /* If PMP is supported, we have to do follow-up SRST. Note
4149 * that some PMPs don't send D2H Reg FIS after hardreset at
4150 * all if the first port is empty. Wait for it just for a
4151 * second and request follow-up SRST.
4153 if (ap->flags & ATA_FLAG_PMP) {
4154 ata_wait_ready(ap, jiffies + HZ);
4158 rc = ata_wait_ready(ap, deadline);
4159 /* link occupied, -ENODEV too is an error */
4161 ata_link_printk(link, KERN_ERR,
4162 "COMRESET failed (errno=%d)\n", rc);
4166 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4168 *class = ata_dev_try_classify(link->device, 1, NULL);
4170 DPRINTK("EXIT, class=%u\n", *class);
4175 * ata_std_postreset - standard postreset callback
4176 * @link: the target ata_link
4177 * @classes: classes of attached devices
4179 * This function is invoked after a successful reset. Note that
4180 * the device might have been reset more than once using
4181 * different reset methods before postreset is invoked.
4184 * Kernel thread context (may sleep)
4186 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
4188 struct ata_port *ap = link->ap;
4193 /* print link status */
4194 sata_print_link_status(link);
4197 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4198 sata_scr_write(link, SCR_ERROR, serror);
4199 link->eh_info.serror = 0;
4201 /* is double-select really necessary? */
4202 if (classes[0] != ATA_DEV_NONE)
4203 ap->ops->dev_select(ap, 1);
4204 if (classes[1] != ATA_DEV_NONE)
4205 ap->ops->dev_select(ap, 0);
4207 /* bail out if no device is present */
4208 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4209 DPRINTK("EXIT, no device\n");
4213 /* set up device control */
4214 if (ap->ioaddr.ctl_addr)
4215 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
4221 * ata_dev_same_device - Determine whether new ID matches configured device
4222 * @dev: device to compare against
4223 * @new_class: class of the new device
4224 * @new_id: IDENTIFY page of the new device
4226 * Compare @new_class and @new_id against @dev and determine
4227 * whether @dev is the device indicated by @new_class and
4234 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4236 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4239 const u16 *old_id = dev->id;
4240 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4241 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
4243 if (dev->class != new_class) {
4244 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4245 dev->class, new_class);
4249 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4250 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4251 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4252 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
4254 if (strcmp(model[0], model[1])) {
4255 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4256 "'%s' != '%s'\n", model[0], model[1]);
4260 if (strcmp(serial[0], serial[1])) {
4261 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4262 "'%s' != '%s'\n", serial[0], serial[1]);
4270 * ata_dev_reread_id - Re-read IDENTIFY data
4271 * @dev: target ATA device
4272 * @readid_flags: read ID flags
4274 * Re-read IDENTIFY page and make sure @dev is still attached to
4278 * Kernel thread context (may sleep)
4281 * 0 on success, negative errno otherwise
4283 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
4285 unsigned int class = dev->class;
4286 u16 *id = (void *)dev->link->ap->sector_buf;
4290 rc = ata_dev_read_id(dev, &class, readid_flags, id);
4294 /* is the device still there? */
4295 if (!ata_dev_same_device(dev, class, id))
4298 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
4303 * ata_dev_revalidate - Revalidate ATA device
4304 * @dev: device to revalidate
4305 * @new_class: new class code
4306 * @readid_flags: read ID flags
4308 * Re-read IDENTIFY page, make sure @dev is still attached to the
4309 * port and reconfigure it according to the new IDENTIFY page.
4312 * Kernel thread context (may sleep)
4315 * 0 on success, negative errno otherwise
4317 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4318 unsigned int readid_flags)
4320 u64 n_sectors = dev->n_sectors;
4323 if (!ata_dev_enabled(dev))
4326 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4327 if (ata_class_enabled(new_class) &&
4328 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4329 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4330 dev->class, new_class);
4336 rc = ata_dev_reread_id(dev, readid_flags);
4340 /* configure device according to the new ID */
4341 rc = ata_dev_configure(dev);
4345 /* verify n_sectors hasn't changed */
4346 if (dev->class == ATA_DEV_ATA && n_sectors &&
4347 dev->n_sectors != n_sectors) {
4348 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4350 (unsigned long long)n_sectors,
4351 (unsigned long long)dev->n_sectors);
4353 /* restore original n_sectors */
4354 dev->n_sectors = n_sectors;
4363 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
4367 struct ata_blacklist_entry {
4368 const char *model_num;
4369 const char *model_rev;
4370 unsigned long horkage;
4373 static const struct ata_blacklist_entry ata_device_blacklist [] = {
4374 /* Devices with DMA related problems under Linux */
4375 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4376 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4377 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4378 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4379 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4380 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4381 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4382 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4383 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4384 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4385 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4386 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4387 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4388 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4389 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4390 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4391 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4392 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4393 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4394 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4395 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4396 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4397 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4398 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4399 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4400 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
4401 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4402 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
4403 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
4404 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
4405 /* Odd clown on sil3726/4726 PMPs */
4406 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4407 ATA_HORKAGE_SKIP_PM },
4409 /* Weird ATAPI devices */
4410 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
4412 /* Devices we expect to fail diagnostics */
4414 /* Devices where NCQ should be avoided */
4416 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
4417 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
4418 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4419 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
4421 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
4422 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
4423 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
4424 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
4426 /* Blacklist entries taken from Silicon Image 3124/3132
4427 Windows driver .inf file - also several Linux problem reports */
4428 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4429 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4430 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
4432 /* devices which puke on READ_NATIVE_MAX */
4433 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4434 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4435 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4436 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
4438 /* Devices which report 1 sector over size HPA */
4439 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4440 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4441 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
4443 /* Devices which get the IVB wrong */
4444 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4445 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4446 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4447 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4448 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
4454 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4460 * check for trailing wildcard: *\0
4462 p = strchr(patt, wildchar);
4463 if (p && ((*(p + 1)) == 0))
4474 return strncmp(patt, name, len);
4477 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
4479 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4480 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
4481 const struct ata_blacklist_entry *ad = ata_device_blacklist;
4483 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4484 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
4486 while (ad->model_num) {
4487 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
4488 if (ad->model_rev == NULL)
4490 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
4498 static int ata_dma_blacklisted(const struct ata_device *dev)
4500 /* We don't support polling DMA.
4501 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4502 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4504 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
4505 (dev->flags & ATA_DFLAG_CDB_INTR))
4507 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
4511 * ata_is_40wire - check drive side detection
4514 * Perform drive side detection decoding, allowing for device vendors
4515 * who can't follow the documentation.
4518 static int ata_is_40wire(struct ata_device *dev)
4520 if (dev->horkage & ATA_HORKAGE_IVB)
4521 return ata_drive_40wire_relaxed(dev->id);
4522 return ata_drive_40wire(dev->id);
4526 * ata_dev_xfermask - Compute supported xfermask of the given device
4527 * @dev: Device to compute xfermask for
4529 * Compute supported xfermask of @dev and store it in
4530 * dev->*_mask. This function is responsible for applying all
4531 * known limits including host controller limits, device
4537 static void ata_dev_xfermask(struct ata_device *dev)
4539 struct ata_link *link = dev->link;
4540 struct ata_port *ap = link->ap;
4541 struct ata_host *host = ap->host;
4542 unsigned long xfer_mask;
4544 /* controller modes available */
4545 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4546 ap->mwdma_mask, ap->udma_mask);
4548 /* drive modes available */
4549 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4550 dev->mwdma_mask, dev->udma_mask);
4551 xfer_mask &= ata_id_xfermask(dev->id);
4554 * CFA Advanced TrueIDE timings are not allowed on a shared
4557 if (ata_dev_pair(dev)) {
4558 /* No PIO5 or PIO6 */
4559 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4560 /* No MWDMA3 or MWDMA 4 */
4561 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4564 if (ata_dma_blacklisted(dev)) {
4565 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4566 ata_dev_printk(dev, KERN_WARNING,
4567 "device is on DMA blacklist, disabling DMA\n");
4570 if ((host->flags & ATA_HOST_SIMPLEX) &&
4571 host->simplex_claimed && host->simplex_claimed != ap) {
4572 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4573 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4574 "other device, disabling DMA\n");
4577 if (ap->flags & ATA_FLAG_NO_IORDY)
4578 xfer_mask &= ata_pio_mask_no_iordy(dev);
4580 if (ap->ops->mode_filter)
4581 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
4583 /* Apply cable rule here. Don't apply it early because when
4584 * we handle hot plug the cable type can itself change.
4585 * Check this last so that we know if the transfer rate was
4586 * solely limited by the cable.
4587 * Unknown or 80 wire cables reported host side are checked
4588 * drive side as well. Cases where we know a 40wire cable
4589 * is used safely for 80 are not checked here.
4591 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4592 /* UDMA/44 or higher would be available */
4593 if ((ap->cbl == ATA_CBL_PATA40) ||
4594 (ata_is_40wire(dev) &&
4595 (ap->cbl == ATA_CBL_PATA_UNK ||
4596 ap->cbl == ATA_CBL_PATA80))) {
4597 ata_dev_printk(dev, KERN_WARNING,
4598 "limited to UDMA/33 due to 40-wire cable\n");
4599 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4602 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4603 &dev->mwdma_mask, &dev->udma_mask);
4607 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4608 * @dev: Device to which command will be sent
4610 * Issue SET FEATURES - XFER MODE command to device @dev
4614 * PCI/etc. bus probe sem.
4617 * 0 on success, AC_ERR_* mask otherwise.
4620 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4622 struct ata_taskfile tf;
4623 unsigned int err_mask;
4625 /* set up set-features taskfile */
4626 DPRINTK("set features - xfer mode\n");
4628 /* Some controllers and ATAPI devices show flaky interrupt
4629 * behavior after setting xfer mode. Use polling instead.
4631 ata_tf_init(dev, &tf);
4632 tf.command = ATA_CMD_SET_FEATURES;
4633 tf.feature = SETFEATURES_XFER;
4634 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4635 tf.protocol = ATA_PROT_NODATA;
4636 /* If we are using IORDY we must send the mode setting command */
4637 if (ata_pio_need_iordy(dev))
4638 tf.nsect = dev->xfer_mode;
4639 /* If the device has IORDY and the controller does not - turn it off */
4640 else if (ata_id_has_iordy(dev->id))
4642 else /* In the ancient relic department - skip all of this */
4645 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4647 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4651 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
4652 * @dev: Device to which command will be sent
4653 * @enable: Whether to enable or disable the feature
4654 * @feature: The sector count represents the feature to set
4656 * Issue SET FEATURES - SATA FEATURES command to device @dev
4657 * on port @ap with sector count
4660 * PCI/etc. bus probe sem.
4663 * 0 on success, AC_ERR_* mask otherwise.
4665 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4668 struct ata_taskfile tf;
4669 unsigned int err_mask;
4671 /* set up set-features taskfile */
4672 DPRINTK("set features - SATA features\n");
4674 ata_tf_init(dev, &tf);
4675 tf.command = ATA_CMD_SET_FEATURES;
4676 tf.feature = enable;
4677 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4678 tf.protocol = ATA_PROT_NODATA;
4681 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4683 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4688 * ata_dev_init_params - Issue INIT DEV PARAMS command
4689 * @dev: Device to which command will be sent
4690 * @heads: Number of heads (taskfile parameter)
4691 * @sectors: Number of sectors (taskfile parameter)
4694 * Kernel thread context (may sleep)
4697 * 0 on success, AC_ERR_* mask otherwise.
4699 static unsigned int ata_dev_init_params(struct ata_device *dev,
4700 u16 heads, u16 sectors)
4702 struct ata_taskfile tf;
4703 unsigned int err_mask;
4705 /* Number of sectors per track 1-255. Number of heads 1-16 */
4706 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4707 return AC_ERR_INVALID;
4709 /* set up init dev params taskfile */
4710 DPRINTK("init dev params \n");
4712 ata_tf_init(dev, &tf);
4713 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4714 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4715 tf.protocol = ATA_PROT_NODATA;
4717 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4719 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
4720 /* A clean abort indicates an original or just out of spec drive
4721 and we should continue as we issue the setup based on the
4722 drive reported working geometry */
4723 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4726 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4731 * ata_sg_clean - Unmap DMA memory associated with command
4732 * @qc: Command containing DMA memory to be released
4734 * Unmap all mapped DMA memory associated with this command.
4737 * spin_lock_irqsave(host lock)
4739 void ata_sg_clean(struct ata_queued_cmd *qc)
4741 struct ata_port *ap = qc->ap;
4742 struct scatterlist *sg = qc->sg;
4743 int dir = qc->dma_dir;
4745 WARN_ON(sg == NULL);
4747 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4750 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4752 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4757 * ata_fill_sg - Fill PCI IDE PRD table
4758 * @qc: Metadata associated with taskfile to be transferred
4760 * Fill PCI IDE PRD (scatter-gather) table with segments
4761 * associated with the current disk command.
4764 * spin_lock_irqsave(host lock)
4767 static void ata_fill_sg(struct ata_queued_cmd *qc)
4769 struct ata_port *ap = qc->ap;
4770 struct scatterlist *sg;
4771 unsigned int si, pi;
4774 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4778 /* determine if physical DMA addr spans 64K boundary.
4779 * Note h/w doesn't support 64-bit, so we unconditionally
4780 * truncate dma_addr_t to u32.
4782 addr = (u32) sg_dma_address(sg);
4783 sg_len = sg_dma_len(sg);
4786 offset = addr & 0xffff;
4788 if ((offset + sg_len) > 0x10000)
4789 len = 0x10000 - offset;
4791 ap->prd[pi].addr = cpu_to_le32(addr);
4792 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4793 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4801 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4805 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4806 * @qc: Metadata associated with taskfile to be transferred
4808 * Fill PCI IDE PRD (scatter-gather) table with segments
4809 * associated with the current disk command. Perform the fill
4810 * so that we avoid writing any length 64K records for
4811 * controllers that don't follow the spec.
4814 * spin_lock_irqsave(host lock)
4817 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4819 struct ata_port *ap = qc->ap;
4820 struct scatterlist *sg;
4821 unsigned int si, pi;
4824 for_each_sg(qc->sg, sg, qc->n_elem, si) {
4826 u32 sg_len, len, blen;
4828 /* determine if physical DMA addr spans 64K boundary.
4829 * Note h/w doesn't support 64-bit, so we unconditionally
4830 * truncate dma_addr_t to u32.
4832 addr = (u32) sg_dma_address(sg);
4833 sg_len = sg_dma_len(sg);
4836 offset = addr & 0xffff;
4838 if ((offset + sg_len) > 0x10000)
4839 len = 0x10000 - offset;
4841 blen = len & 0xffff;
4842 ap->prd[pi].addr = cpu_to_le32(addr);
4844 /* Some PATA chipsets like the CS5530 can't
4845 cope with 0x0000 meaning 64K as the spec says */
4846 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
4848 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
4850 ap->prd[pi].flags_len = cpu_to_le32(blen);
4851 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
4859 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4863 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4864 * @qc: Metadata associated with taskfile to check
4866 * Allow low-level driver to filter ATA PACKET commands, returning
4867 * a status indicating whether or not it is OK to use DMA for the
4868 * supplied PACKET command.
4871 * spin_lock_irqsave(host lock)
4873 * RETURNS: 0 when ATAPI DMA can be used
4876 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4878 struct ata_port *ap = qc->ap;
4880 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4881 * few ATAPI devices choke on such DMA requests.
4883 if (unlikely(qc->nbytes & 15))
4886 if (ap->ops->check_atapi_dma)
4887 return ap->ops->check_atapi_dma(qc);
4893 * ata_std_qc_defer - Check whether a qc needs to be deferred
4894 * @qc: ATA command in question
4896 * Non-NCQ commands cannot run with any other command, NCQ or
4897 * not. As upper layer only knows the queue depth, we are
4898 * responsible for maintaining exclusion. This function checks
4899 * whether a new command @qc can be issued.
4902 * spin_lock_irqsave(host lock)
4905 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4907 int ata_std_qc_defer(struct ata_queued_cmd *qc)
4909 struct ata_link *link = qc->dev->link;
4911 if (qc->tf.protocol == ATA_PROT_NCQ) {
4912 if (!ata_tag_valid(link->active_tag))
4915 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4919 return ATA_DEFER_LINK;
4923 * ata_qc_prep - Prepare taskfile for submission
4924 * @qc: Metadata associated with taskfile to be prepared
4926 * Prepare ATA taskfile for submission.
4929 * spin_lock_irqsave(host lock)
4931 void ata_qc_prep(struct ata_queued_cmd *qc)
4933 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4940 * ata_dumb_qc_prep - Prepare taskfile for submission
4941 * @qc: Metadata associated with taskfile to be prepared
4943 * Prepare ATA taskfile for submission.
4946 * spin_lock_irqsave(host lock)
4948 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4950 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4953 ata_fill_sg_dumb(qc);
4956 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4959 * ata_sg_init - Associate command with scatter-gather table.
4960 * @qc: Command to be associated
4961 * @sg: Scatter-gather table.
4962 * @n_elem: Number of elements in s/g table.
4964 * Initialize the data-related elements of queued_cmd @qc
4965 * to point to a scatter-gather table @sg, containing @n_elem
4969 * spin_lock_irqsave(host lock)
4971 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4972 unsigned int n_elem)
4975 qc->n_elem = n_elem;
4980 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4981 * @qc: Command with scatter-gather table to be mapped.
4983 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4986 * spin_lock_irqsave(host lock)
4989 * Zero on success, negative on error.
4992 static int ata_sg_setup(struct ata_queued_cmd *qc)
4994 struct ata_port *ap = qc->ap;
4995 unsigned int n_elem;
4997 VPRINTK("ENTER, ata%u\n", ap->print_id);
4999 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5003 DPRINTK("%d sg elements mapped\n", n_elem);
5005 qc->n_elem = n_elem;
5006 qc->flags |= ATA_QCFLAG_DMAMAP;
5012 * swap_buf_le16 - swap halves of 16-bit words in place
5013 * @buf: Buffer to swap
5014 * @buf_words: Number of 16-bit words in buffer.
5016 * Swap halves of 16-bit words if needed to convert from
5017 * little-endian byte order to native cpu byte order, or
5021 * Inherited from caller.
5023 void swap_buf_le16(u16 *buf, unsigned int buf_words)
5028 for (i = 0; i < buf_words; i++)
5029 buf[i] = le16_to_cpu(buf[i]);
5030 #endif /* __BIG_ENDIAN */
5034 * ata_data_xfer - Transfer data by PIO
5035 * @dev: device to target
5037 * @buflen: buffer length
5040 * Transfer data from/to the device data register by PIO.
5043 * Inherited from caller.
5048 unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5049 unsigned int buflen, int rw)
5051 struct ata_port *ap = dev->link->ap;
5052 void __iomem *data_addr = ap->ioaddr.data_addr;
5053 unsigned int words = buflen >> 1;
5055 /* Transfer multiple of 2 bytes */
5057 ioread16_rep(data_addr, buf, words);
5059 iowrite16_rep(data_addr, buf, words);
5061 /* Transfer trailing 1 byte, if any. */
5062 if (unlikely(buflen & 0x01)) {
5063 __le16 align_buf[1] = { 0 };
5064 unsigned char *trailing_buf = buf + buflen - 1;
5067 align_buf[0] = cpu_to_le16(ioread16(data_addr));
5068 memcpy(trailing_buf, align_buf, 1);
5070 memcpy(align_buf, trailing_buf, 1);
5071 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
5080 * ata_data_xfer_noirq - Transfer data by PIO
5081 * @dev: device to target
5083 * @buflen: buffer length
5086 * Transfer data from/to the device data register by PIO. Do the
5087 * transfer with interrupts disabled.
5090 * Inherited from caller.
5095 unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5096 unsigned int buflen, int rw)
5098 unsigned long flags;
5099 unsigned int consumed;
5101 local_irq_save(flags);
5102 consumed = ata_data_xfer(dev, buf, buflen, rw);
5103 local_irq_restore(flags);
5110 * ata_pio_sector - Transfer a sector of data.
5111 * @qc: Command on going
5113 * Transfer qc->sect_size bytes of data from/to the ATA device.
5116 * Inherited from caller.
5119 static void ata_pio_sector(struct ata_queued_cmd *qc)
5121 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
5122 struct ata_port *ap = qc->ap;
5124 unsigned int offset;
5127 if (qc->curbytes == qc->nbytes - qc->sect_size)
5128 ap->hsm_task_state = HSM_ST_LAST;
5130 page = sg_page(qc->cursg);
5131 offset = qc->cursg->offset + qc->cursg_ofs;
5133 /* get the current page and offset */
5134 page = nth_page(page, (offset >> PAGE_SHIFT));
5135 offset %= PAGE_SIZE;
5137 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5139 if (PageHighMem(page)) {
5140 unsigned long flags;
5142 /* FIXME: use a bounce buffer */
5143 local_irq_save(flags);
5144 buf = kmap_atomic(page, KM_IRQ0);
5146 /* do the actual data transfer */
5147 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5149 kunmap_atomic(buf, KM_IRQ0);
5150 local_irq_restore(flags);
5152 buf = page_address(page);
5153 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
5156 qc->curbytes += qc->sect_size;
5157 qc->cursg_ofs += qc->sect_size;
5159 if (qc->cursg_ofs == qc->cursg->length) {
5160 qc->cursg = sg_next(qc->cursg);
5166 * ata_pio_sectors - Transfer one or many sectors.
5167 * @qc: Command on going
5169 * Transfer one or many sectors of data from/to the
5170 * ATA device for the DRQ request.
5173 * Inherited from caller.
5176 static void ata_pio_sectors(struct ata_queued_cmd *qc)
5178 if (is_multi_taskfile(&qc->tf)) {
5179 /* READ/WRITE MULTIPLE */
5182 WARN_ON(qc->dev->multi_count == 0);
5184 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
5185 qc->dev->multi_count);
5191 ata_altstatus(qc->ap); /* flush */
5195 * atapi_send_cdb - Write CDB bytes to hardware
5196 * @ap: Port to which ATAPI device is attached.
5197 * @qc: Taskfile currently active
5199 * When device has indicated its readiness to accept
5200 * a CDB, this function is called. Send the CDB.
5206 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5209 DPRINTK("send cdb\n");
5210 WARN_ON(qc->dev->cdb_len < 12);
5212 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
5213 ata_altstatus(ap); /* flush */
5215 switch (qc->tf.protocol) {
5216 case ATAPI_PROT_PIO:
5217 ap->hsm_task_state = HSM_ST;
5219 case ATAPI_PROT_NODATA:
5220 ap->hsm_task_state = HSM_ST_LAST;
5222 case ATAPI_PROT_DMA:
5223 ap->hsm_task_state = HSM_ST_LAST;
5224 /* initiate bmdma */
5225 ap->ops->bmdma_start(qc);
5231 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5232 * @qc: Command on going
5233 * @bytes: number of bytes
5235 * Transfer Transfer data from/to the ATAPI device.
5238 * Inherited from caller.
5241 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5243 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
5244 struct ata_port *ap = qc->ap;
5245 struct ata_device *dev = qc->dev;
5246 struct ata_eh_info *ehi = &dev->link->eh_info;
5247 struct scatterlist *sg;
5250 unsigned int offset, count, consumed;
5254 if (unlikely(!sg)) {
5255 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5256 "buf=%u cur=%u bytes=%u",
5257 qc->nbytes, qc->curbytes, bytes);
5262 offset = sg->offset + qc->cursg_ofs;
5264 /* get the current page and offset */
5265 page = nth_page(page, (offset >> PAGE_SHIFT));
5266 offset %= PAGE_SIZE;
5268 /* don't overrun current sg */
5269 count = min(sg->length - qc->cursg_ofs, bytes);
5271 /* don't cross page boundaries */
5272 count = min(count, (unsigned int)PAGE_SIZE - offset);
5274 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5276 if (PageHighMem(page)) {
5277 unsigned long flags;
5279 /* FIXME: use bounce buffer */
5280 local_irq_save(flags);
5281 buf = kmap_atomic(page, KM_IRQ0);
5283 /* do the actual data transfer */
5284 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5286 kunmap_atomic(buf, KM_IRQ0);
5287 local_irq_restore(flags);
5289 buf = page_address(page);
5290 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
5293 bytes -= min(bytes, consumed);
5294 qc->curbytes += count;
5295 qc->cursg_ofs += count;
5297 if (qc->cursg_ofs == sg->length) {
5298 qc->cursg = sg_next(qc->cursg);
5302 /* consumed can be larger than count only for the last transfer */
5303 WARN_ON(qc->cursg && count != consumed);
5311 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5312 * @qc: Command on going
5314 * Transfer Transfer data from/to the ATAPI device.
5317 * Inherited from caller.
5320 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5322 struct ata_port *ap = qc->ap;
5323 struct ata_device *dev = qc->dev;
5324 struct ata_eh_info *ehi = &dev->link->eh_info;
5325 unsigned int ireason, bc_lo, bc_hi, bytes;
5326 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5328 /* Abuse qc->result_tf for temp storage of intermediate TF
5329 * here to save some kernel stack usage.
5330 * For normal completion, qc->result_tf is not relevant. For
5331 * error, qc->result_tf is later overwritten by ata_qc_complete().
5332 * So, the correctness of qc->result_tf is not affected.
5334 ap->ops->tf_read(ap, &qc->result_tf);
5335 ireason = qc->result_tf.nsect;
5336 bc_lo = qc->result_tf.lbam;
5337 bc_hi = qc->result_tf.lbah;
5338 bytes = (bc_hi << 8) | bc_lo;
5340 /* shall be cleared to zero, indicating xfer of data */
5341 if (unlikely(ireason & (1 << 0)))
5344 /* make sure transfer direction matches expected */
5345 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5346 if (unlikely(do_write != i_write))
5349 if (unlikely(!bytes))
5352 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
5354 if (unlikely(__atapi_pio_bytes(qc, bytes)))
5356 ata_altstatus(ap); /* flush */
5361 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5364 qc->err_mask |= AC_ERR_HSM;
5365 ap->hsm_task_state = HSM_ST_ERR;
5369 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5370 * @ap: the target ata_port
5374 * 1 if ok in workqueue, 0 otherwise.
5377 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
5379 if (qc->tf.flags & ATA_TFLAG_POLLING)
5382 if (ap->hsm_task_state == HSM_ST_FIRST) {
5383 if (qc->tf.protocol == ATA_PROT_PIO &&
5384 (qc->tf.flags & ATA_TFLAG_WRITE))
5387 if (ata_is_atapi(qc->tf.protocol) &&
5388 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5396 * ata_hsm_qc_complete - finish a qc running on standard HSM
5397 * @qc: Command to complete
5398 * @in_wq: 1 if called from workqueue, 0 otherwise
5400 * Finish @qc which is running on standard HSM.
5403 * If @in_wq is zero, spin_lock_irqsave(host lock).
5404 * Otherwise, none on entry and grabs host lock.
5406 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5408 struct ata_port *ap = qc->ap;
5409 unsigned long flags;
5411 if (ap->ops->error_handler) {
5413 spin_lock_irqsave(ap->lock, flags);
5415 /* EH might have kicked in while host lock is
5418 qc = ata_qc_from_tag(ap, qc->tag);
5420 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
5421 ap->ops->irq_on(ap);
5422 ata_qc_complete(qc);
5424 ata_port_freeze(ap);
5427 spin_unlock_irqrestore(ap->lock, flags);
5429 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5430 ata_qc_complete(qc);
5432 ata_port_freeze(ap);
5436 spin_lock_irqsave(ap->lock, flags);
5437 ap->ops->irq_on(ap);
5438 ata_qc_complete(qc);
5439 spin_unlock_irqrestore(ap->lock, flags);
5441 ata_qc_complete(qc);
5446 * ata_hsm_move - move the HSM to the next state.
5447 * @ap: the target ata_port
5449 * @status: current device status
5450 * @in_wq: 1 if called from workqueue, 0 otherwise
5453 * 1 when poll next status needed, 0 otherwise.
5455 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5456 u8 status, int in_wq)
5458 unsigned long flags = 0;
5461 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5463 /* Make sure ata_qc_issue_prot() does not throw things
5464 * like DMA polling into the workqueue. Notice that
5465 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5467 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5470 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5471 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5473 switch (ap->hsm_task_state) {
5475 /* Send first data block or PACKET CDB */
5477 /* If polling, we will stay in the work queue after
5478 * sending the data. Otherwise, interrupt handler
5479 * takes over after sending the data.
5481 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5483 /* check device status */
5484 if (unlikely((status & ATA_DRQ) == 0)) {
5485 /* handle BSY=0, DRQ=0 as error */
5486 if (likely(status & (ATA_ERR | ATA_DF)))
5487 /* device stops HSM for abort/error */
5488 qc->err_mask |= AC_ERR_DEV;
5490 /* HSM violation. Let EH handle this */
5491 qc->err_mask |= AC_ERR_HSM;
5493 ap->hsm_task_state = HSM_ST_ERR;
5497 /* Device should not ask for data transfer (DRQ=1)
5498 * when it finds something wrong.
5499 * We ignore DRQ here and stop the HSM by
5500 * changing hsm_task_state to HSM_ST_ERR and
5501 * let the EH abort the command or reset the device.
5503 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5504 /* Some ATAPI tape drives forget to clear the ERR bit
5505 * when doing the next command (mostly request sense).
5506 * We ignore ERR here to workaround and proceed sending
5509 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5510 ata_port_printk(ap, KERN_WARNING,
5511 "DRQ=1 with device error, "
5512 "dev_stat 0x%X\n", status);
5513 qc->err_mask |= AC_ERR_HSM;
5514 ap->hsm_task_state = HSM_ST_ERR;
5519 /* Send the CDB (atapi) or the first data block (ata pio out).
5520 * During the state transition, interrupt handler shouldn't
5521 * be invoked before the data transfer is complete and
5522 * hsm_task_state is changed. Hence, the following locking.
5525 spin_lock_irqsave(ap->lock, flags);
5527 if (qc->tf.protocol == ATA_PROT_PIO) {
5528 /* PIO data out protocol.
5529 * send first data block.
5532 /* ata_pio_sectors() might change the state
5533 * to HSM_ST_LAST. so, the state is changed here
5534 * before ata_pio_sectors().
5536 ap->hsm_task_state = HSM_ST;
5537 ata_pio_sectors(qc);
5540 atapi_send_cdb(ap, qc);
5543 spin_unlock_irqrestore(ap->lock, flags);
5545 /* if polling, ata_pio_task() handles the rest.
5546 * otherwise, interrupt handler takes over from here.
5551 /* complete command or read/write the data register */
5552 if (qc->tf.protocol == ATAPI_PROT_PIO) {
5553 /* ATAPI PIO protocol */
5554 if ((status & ATA_DRQ) == 0) {
5555 /* No more data to transfer or device error.
5556 * Device error will be tagged in HSM_ST_LAST.
5558 ap->hsm_task_state = HSM_ST_LAST;
5562 /* Device should not ask for data transfer (DRQ=1)
5563 * when it finds something wrong.
5564 * We ignore DRQ here and stop the HSM by
5565 * changing hsm_task_state to HSM_ST_ERR and
5566 * let the EH abort the command or reset the device.
5568 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5569 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5570 "device error, dev_stat 0x%X\n",
5572 qc->err_mask |= AC_ERR_HSM;
5573 ap->hsm_task_state = HSM_ST_ERR;
5577 atapi_pio_bytes(qc);
5579 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5580 /* bad ireason reported by device */
5584 /* ATA PIO protocol */
5585 if (unlikely((status & ATA_DRQ) == 0)) {
5586 /* handle BSY=0, DRQ=0 as error */
5587 if (likely(status & (ATA_ERR | ATA_DF)))
5588 /* device stops HSM for abort/error */
5589 qc->err_mask |= AC_ERR_DEV;
5591 /* HSM violation. Let EH handle this.
5592 * Phantom devices also trigger this
5593 * condition. Mark hint.
5595 qc->err_mask |= AC_ERR_HSM |
5598 ap->hsm_task_state = HSM_ST_ERR;
5602 /* For PIO reads, some devices may ask for
5603 * data transfer (DRQ=1) alone with ERR=1.
5604 * We respect DRQ here and transfer one
5605 * block of junk data before changing the
5606 * hsm_task_state to HSM_ST_ERR.
5608 * For PIO writes, ERR=1 DRQ=1 doesn't make
5609 * sense since the data block has been
5610 * transferred to the device.
5612 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5613 /* data might be corrputed */
5614 qc->err_mask |= AC_ERR_DEV;
5616 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5617 ata_pio_sectors(qc);
5618 status = ata_wait_idle(ap);
5621 if (status & (ATA_BUSY | ATA_DRQ))
5622 qc->err_mask |= AC_ERR_HSM;
5624 /* ata_pio_sectors() might change the
5625 * state to HSM_ST_LAST. so, the state
5626 * is changed after ata_pio_sectors().
5628 ap->hsm_task_state = HSM_ST_ERR;
5632 ata_pio_sectors(qc);
5634 if (ap->hsm_task_state == HSM_ST_LAST &&
5635 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5637 status = ata_wait_idle(ap);
5646 if (unlikely(!ata_ok(status))) {
5647 qc->err_mask |= __ac_err_mask(status);
5648 ap->hsm_task_state = HSM_ST_ERR;
5652 /* no more data to transfer */
5653 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5654 ap->print_id, qc->dev->devno, status);
5656 WARN_ON(qc->err_mask);
5658 ap->hsm_task_state = HSM_ST_IDLE;
5660 /* complete taskfile transaction */
5661 ata_hsm_qc_complete(qc, in_wq);
5667 /* make sure qc->err_mask is available to
5668 * know what's wrong and recover
5670 WARN_ON(qc->err_mask == 0);
5672 ap->hsm_task_state = HSM_ST_IDLE;
5674 /* complete taskfile transaction */
5675 ata_hsm_qc_complete(qc, in_wq);
5687 static void ata_pio_task(struct work_struct *work)
5689 struct ata_port *ap =
5690 container_of(work, struct ata_port, port_task.work);
5691 struct ata_queued_cmd *qc = ap->port_task_data;
5696 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5699 * This is purely heuristic. This is a fast path.
5700 * Sometimes when we enter, BSY will be cleared in
5701 * a chk-status or two. If not, the drive is probably seeking
5702 * or something. Snooze for a couple msecs, then
5703 * chk-status again. If still busy, queue delayed work.
5705 status = ata_busy_wait(ap, ATA_BUSY, 5);
5706 if (status & ATA_BUSY) {
5708 status = ata_busy_wait(ap, ATA_BUSY, 10);
5709 if (status & ATA_BUSY) {
5710 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
5716 poll_next = ata_hsm_move(ap, qc, status, 1);
5718 /* another command or interrupt handler
5719 * may be running at this point.
5726 * ata_qc_new - Request an available ATA command, for queueing
5727 * @ap: Port associated with device @dev
5728 * @dev: Device from whom we request an available command structure
5734 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5736 struct ata_queued_cmd *qc = NULL;
5739 /* no command while frozen */
5740 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5743 /* the last tag is reserved for internal command. */
5744 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5745 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5746 qc = __ata_qc_from_tag(ap, i);
5757 * ata_qc_new_init - Request an available ATA command, and initialize it
5758 * @dev: Device from whom we request an available command structure
5764 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5766 struct ata_port *ap = dev->link->ap;
5767 struct ata_queued_cmd *qc;
5769 qc = ata_qc_new(ap);
5782 * ata_qc_free - free unused ata_queued_cmd
5783 * @qc: Command to complete
5785 * Designed to free unused ata_queued_cmd object
5786 * in case something prevents using it.
5789 * spin_lock_irqsave(host lock)
5791 void ata_qc_free(struct ata_queued_cmd *qc)
5793 struct ata_port *ap = qc->ap;
5796 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5800 if (likely(ata_tag_valid(tag))) {
5801 qc->tag = ATA_TAG_POISON;
5802 clear_bit(tag, &ap->qc_allocated);
5806 void __ata_qc_complete(struct ata_queued_cmd *qc)
5808 struct ata_port *ap = qc->ap;
5809 struct ata_link *link = qc->dev->link;
5811 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5812 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5814 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5817 /* command should be marked inactive atomically with qc completion */
5818 if (qc->tf.protocol == ATA_PROT_NCQ) {
5819 link->sactive &= ~(1 << qc->tag);
5821 ap->nr_active_links--;
5823 link->active_tag = ATA_TAG_POISON;
5824 ap->nr_active_links--;
5827 /* clear exclusive status */
5828 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5829 ap->excl_link == link))
5830 ap->excl_link = NULL;
5832 /* atapi: mark qc as inactive to prevent the interrupt handler
5833 * from completing the command twice later, before the error handler
5834 * is called. (when rc != 0 and atapi request sense is needed)
5836 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5837 ap->qc_active &= ~(1 << qc->tag);
5839 /* call completion callback */
5840 qc->complete_fn(qc);
5843 static void fill_result_tf(struct ata_queued_cmd *qc)
5845 struct ata_port *ap = qc->ap;
5847 qc->result_tf.flags = qc->tf.flags;
5848 ap->ops->tf_read(ap, &qc->result_tf);
5851 static void ata_verify_xfer(struct ata_queued_cmd *qc)
5853 struct ata_device *dev = qc->dev;
5855 if (ata_tag_internal(qc->tag))
5858 if (ata_is_nodata(qc->tf.protocol))
5861 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5864 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5868 * ata_qc_complete - Complete an active ATA command
5869 * @qc: Command to complete
5870 * @err_mask: ATA Status register contents
5872 * Indicate to the mid and upper layers that an ATA
5873 * command has completed, with either an ok or not-ok status.
5876 * spin_lock_irqsave(host lock)
5878 void ata_qc_complete(struct ata_queued_cmd *qc)
5880 struct ata_port *ap = qc->ap;
5882 /* XXX: New EH and old EH use different mechanisms to
5883 * synchronize EH with regular execution path.
5885 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5886 * Normal execution path is responsible for not accessing a
5887 * failed qc. libata core enforces the rule by returning NULL
5888 * from ata_qc_from_tag() for failed qcs.
5890 * Old EH depends on ata_qc_complete() nullifying completion
5891 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5892 * not synchronize with interrupt handler. Only PIO task is
5895 if (ap->ops->error_handler) {
5896 struct ata_device *dev = qc->dev;
5897 struct ata_eh_info *ehi = &dev->link->eh_info;
5899 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5901 if (unlikely(qc->err_mask))
5902 qc->flags |= ATA_QCFLAG_FAILED;
5904 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5905 if (!ata_tag_internal(qc->tag)) {
5906 /* always fill result TF for failed qc */
5908 ata_qc_schedule_eh(qc);
5913 /* read result TF if requested */
5914 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5917 /* Some commands need post-processing after successful
5920 switch (qc->tf.command) {
5921 case ATA_CMD_SET_FEATURES:
5922 if (qc->tf.feature != SETFEATURES_WC_ON &&
5923 qc->tf.feature != SETFEATURES_WC_OFF)
5926 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5927 case ATA_CMD_SET_MULTI: /* multi_count changed */
5928 /* revalidate device */
5929 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5930 ata_port_schedule_eh(ap);
5934 dev->flags |= ATA_DFLAG_SLEEPING;
5938 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5939 ata_verify_xfer(qc);
5941 __ata_qc_complete(qc);
5943 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5946 /* read result TF if failed or requested */
5947 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5950 __ata_qc_complete(qc);
5955 * ata_qc_complete_multiple - Complete multiple qcs successfully
5956 * @ap: port in question
5957 * @qc_active: new qc_active mask
5958 * @finish_qc: LLDD callback invoked before completing a qc
5960 * Complete in-flight commands. This functions is meant to be
5961 * called from low-level driver's interrupt routine to complete
5962 * requests normally. ap->qc_active and @qc_active is compared
5963 * and commands are completed accordingly.
5966 * spin_lock_irqsave(host lock)
5969 * Number of completed commands on success, -errno otherwise.
5971 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5972 void (*finish_qc)(struct ata_queued_cmd *))
5978 done_mask = ap->qc_active ^ qc_active;
5980 if (unlikely(done_mask & qc_active)) {
5981 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5982 "(%08x->%08x)\n", ap->qc_active, qc_active);
5986 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5987 struct ata_queued_cmd *qc;
5989 if (!(done_mask & (1 << i)))
5992 if ((qc = ata_qc_from_tag(ap, i))) {
5995 ata_qc_complete(qc);
6004 * ata_qc_issue - issue taskfile to device
6005 * @qc: command to issue to device
6007 * Prepare an ATA command to submission to device.
6008 * This includes mapping the data into a DMA-able
6009 * area, filling in the S/G table, and finally
6010 * writing the taskfile to hardware, starting the command.
6013 * spin_lock_irqsave(host lock)
6015 void ata_qc_issue(struct ata_queued_cmd *qc)
6017 struct ata_port *ap = qc->ap;
6018 struct ata_link *link = qc->dev->link;
6019 u8 prot = qc->tf.protocol;
6021 /* Make sure only one non-NCQ command is outstanding. The
6022 * check is skipped for old EH because it reuses active qc to
6023 * request ATAPI sense.
6025 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
6027 if (ata_is_ncq(prot)) {
6028 WARN_ON(link->sactive & (1 << qc->tag));
6031 ap->nr_active_links++;
6032 link->sactive |= 1 << qc->tag;
6034 WARN_ON(link->sactive);
6036 ap->nr_active_links++;
6037 link->active_tag = qc->tag;
6040 qc->flags |= ATA_QCFLAG_ACTIVE;
6041 ap->qc_active |= 1 << qc->tag;
6043 /* We guarantee to LLDs that they will have at least one
6044 * non-zero sg if the command is a data command.
6046 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
6048 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
6049 (ap->flags & ATA_FLAG_PIO_DMA)))
6050 if (ata_sg_setup(qc))
6053 /* if device is sleeping, schedule softreset and abort the link */
6054 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6055 link->eh_info.action |= ATA_EH_SOFTRESET;
6056 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6057 ata_link_abort(link);
6061 ap->ops->qc_prep(qc);
6063 qc->err_mask |= ap->ops->qc_issue(qc);
6064 if (unlikely(qc->err_mask))
6069 qc->err_mask |= AC_ERR_SYSTEM;
6071 ata_qc_complete(qc);
6075 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6076 * @qc: command to issue to device
6078 * Using various libata functions and hooks, this function
6079 * starts an ATA command. ATA commands are grouped into
6080 * classes called "protocols", and issuing each type of protocol
6081 * is slightly different.
6083 * May be used as the qc_issue() entry in ata_port_operations.
6086 * spin_lock_irqsave(host lock)
6089 * Zero on success, AC_ERR_* mask on failure
6092 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
6094 struct ata_port *ap = qc->ap;
6096 /* Use polling pio if the LLD doesn't handle
6097 * interrupt driven pio and atapi CDB interrupt.
6099 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6100 switch (qc->tf.protocol) {
6102 case ATA_PROT_NODATA:
6103 case ATAPI_PROT_PIO:
6104 case ATAPI_PROT_NODATA:
6105 qc->tf.flags |= ATA_TFLAG_POLLING;
6107 case ATAPI_PROT_DMA:
6108 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
6109 /* see ata_dma_blacklisted() */
6117 /* select the device */
6118 ata_dev_select(ap, qc->dev->devno, 1, 0);
6120 /* start the command */
6121 switch (qc->tf.protocol) {
6122 case ATA_PROT_NODATA:
6123 if (qc->tf.flags & ATA_TFLAG_POLLING)
6124 ata_qc_set_polling(qc);
6126 ata_tf_to_host(ap, &qc->tf);
6127 ap->hsm_task_state = HSM_ST_LAST;
6129 if (qc->tf.flags & ATA_TFLAG_POLLING)
6130 ata_pio_queue_task(ap, qc, 0);
6135 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6137 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6138 ap->ops->bmdma_setup(qc); /* set up bmdma */
6139 ap->ops->bmdma_start(qc); /* initiate bmdma */
6140 ap->hsm_task_state = HSM_ST_LAST;
6144 if (qc->tf.flags & ATA_TFLAG_POLLING)
6145 ata_qc_set_polling(qc);
6147 ata_tf_to_host(ap, &qc->tf);
6149 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6150 /* PIO data out protocol */
6151 ap->hsm_task_state = HSM_ST_FIRST;
6152 ata_pio_queue_task(ap, qc, 0);
6154 /* always send first data block using
6155 * the ata_pio_task() codepath.
6158 /* PIO data in protocol */
6159 ap->hsm_task_state = HSM_ST;
6161 if (qc->tf.flags & ATA_TFLAG_POLLING)
6162 ata_pio_queue_task(ap, qc, 0);
6164 /* if polling, ata_pio_task() handles the rest.
6165 * otherwise, interrupt handler takes over from here.
6171 case ATAPI_PROT_PIO:
6172 case ATAPI_PROT_NODATA:
6173 if (qc->tf.flags & ATA_TFLAG_POLLING)
6174 ata_qc_set_polling(qc);
6176 ata_tf_to_host(ap, &qc->tf);
6178 ap->hsm_task_state = HSM_ST_FIRST;
6180 /* send cdb by polling if no cdb interrupt */
6181 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6182 (qc->tf.flags & ATA_TFLAG_POLLING))
6183 ata_pio_queue_task(ap, qc, 0);
6186 case ATAPI_PROT_DMA:
6187 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
6189 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6190 ap->ops->bmdma_setup(qc); /* set up bmdma */
6191 ap->hsm_task_state = HSM_ST_FIRST;
6193 /* send cdb by polling if no cdb interrupt */
6194 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6195 ata_pio_queue_task(ap, qc, 0);
6200 return AC_ERR_SYSTEM;
6207 * ata_host_intr - Handle host interrupt for given (port, task)
6208 * @ap: Port on which interrupt arrived (possibly...)
6209 * @qc: Taskfile currently active in engine
6211 * Handle host interrupt for given queued command. Currently,
6212 * only DMA interrupts are handled. All other commands are
6213 * handled via polling with interrupts disabled (nIEN bit).
6216 * spin_lock_irqsave(host lock)
6219 * One if interrupt was handled, zero if not (shared irq).
6222 inline unsigned int ata_host_intr(struct ata_port *ap,
6223 struct ata_queued_cmd *qc)
6225 struct ata_eh_info *ehi = &ap->link.eh_info;
6226 u8 status, host_stat = 0;
6228 VPRINTK("ata%u: protocol %d task_state %d\n",
6229 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
6231 /* Check whether we are expecting interrupt in this state */
6232 switch (ap->hsm_task_state) {
6234 /* Some pre-ATAPI-4 devices assert INTRQ
6235 * at this state when ready to receive CDB.
6238 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6239 * The flag was turned on only for atapi devices. No
6240 * need to check ata_is_atapi(qc->tf.protocol) again.
6242 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
6246 if (qc->tf.protocol == ATA_PROT_DMA ||
6247 qc->tf.protocol == ATAPI_PROT_DMA) {
6248 /* check status of DMA engine */
6249 host_stat = ap->ops->bmdma_status(ap);
6250 VPRINTK("ata%u: host_stat 0x%X\n",
6251 ap->print_id, host_stat);
6253 /* if it's not our irq... */
6254 if (!(host_stat & ATA_DMA_INTR))
6257 /* before we do anything else, clear DMA-Start bit */
6258 ap->ops->bmdma_stop(qc);
6260 if (unlikely(host_stat & ATA_DMA_ERR)) {
6261 /* error when transfering data to/from memory */
6262 qc->err_mask |= AC_ERR_HOST_BUS;
6263 ap->hsm_task_state = HSM_ST_ERR;
6273 /* check altstatus */
6274 status = ata_altstatus(ap);
6275 if (status & ATA_BUSY)
6278 /* check main status, clearing INTRQ */
6279 status = ata_chk_status(ap);
6280 if (unlikely(status & ATA_BUSY))
6283 /* ack bmdma irq events */
6284 ap->ops->irq_clear(ap);
6286 ata_hsm_move(ap, qc, status, 0);
6288 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6289 qc->tf.protocol == ATAPI_PROT_DMA))
6290 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6292 return 1; /* irq handled */
6295 ap->stats.idle_irq++;
6298 if ((ap->stats.idle_irq % 1000) == 0) {
6300 ap->ops->irq_clear(ap);
6301 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
6305 return 0; /* irq not handled */
6309 * ata_interrupt - Default ATA host interrupt handler
6310 * @irq: irq line (unused)
6311 * @dev_instance: pointer to our ata_host information structure
6313 * Default interrupt handler for PCI IDE devices. Calls
6314 * ata_host_intr() for each port that is not disabled.
6317 * Obtains host lock during operation.
6320 * IRQ_NONE or IRQ_HANDLED.
6323 irqreturn_t ata_interrupt(int irq, void *dev_instance)
6325 struct ata_host *host = dev_instance;
6327 unsigned int handled = 0;
6328 unsigned long flags;
6330 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
6331 spin_lock_irqsave(&host->lock, flags);
6333 for (i = 0; i < host->n_ports; i++) {
6334 struct ata_port *ap;
6336 ap = host->ports[i];
6338 !(ap->flags & ATA_FLAG_DISABLED)) {
6339 struct ata_queued_cmd *qc;
6341 qc = ata_qc_from_tag(ap, ap->link.active_tag);
6342 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
6343 (qc->flags & ATA_QCFLAG_ACTIVE))
6344 handled |= ata_host_intr(ap, qc);
6348 spin_unlock_irqrestore(&host->lock, flags);
6350 return IRQ_RETVAL(handled);
6354 * sata_scr_valid - test whether SCRs are accessible
6355 * @link: ATA link to test SCR accessibility for
6357 * Test whether SCRs are accessible for @link.
6363 * 1 if SCRs are accessible, 0 otherwise.
6365 int sata_scr_valid(struct ata_link *link)
6367 struct ata_port *ap = link->ap;
6369 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
6373 * sata_scr_read - read SCR register of the specified port
6374 * @link: ATA link to read SCR for
6376 * @val: Place to store read value
6378 * Read SCR register @reg of @link into *@val. This function is
6379 * guaranteed to succeed if @link is ap->link, the cable type of
6380 * the port is SATA and the port implements ->scr_read.
6383 * None if @link is ap->link. Kernel thread context otherwise.
6386 * 0 on success, negative errno on failure.
6388 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
6390 if (ata_is_host_link(link)) {
6391 struct ata_port *ap = link->ap;
6393 if (sata_scr_valid(link))
6394 return ap->ops->scr_read(ap, reg, val);
6398 return sata_pmp_scr_read(link, reg, val);
6402 * sata_scr_write - write SCR register of the specified port
6403 * @link: ATA link to write SCR for
6404 * @reg: SCR to write
6405 * @val: value to write
6407 * Write @val to SCR register @reg of @link. This function is
6408 * guaranteed to succeed if @link is ap->link, the cable type of
6409 * the port is SATA and the port implements ->scr_read.
6412 * None if @link is ap->link. Kernel thread context otherwise.
6415 * 0 on success, negative errno on failure.
6417 int sata_scr_write(struct ata_link *link, int reg, u32 val)
6419 if (ata_is_host_link(link)) {
6420 struct ata_port *ap = link->ap;
6422 if (sata_scr_valid(link))
6423 return ap->ops->scr_write(ap, reg, val);
6427 return sata_pmp_scr_write(link, reg, val);
6431 * sata_scr_write_flush - write SCR register of the specified port and flush
6432 * @link: ATA link to write SCR for
6433 * @reg: SCR to write
6434 * @val: value to write
6436 * This function is identical to sata_scr_write() except that this
6437 * function performs flush after writing to the register.
6440 * None if @link is ap->link. Kernel thread context otherwise.
6443 * 0 on success, negative errno on failure.
6445 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
6447 if (ata_is_host_link(link)) {
6448 struct ata_port *ap = link->ap;
6451 if (sata_scr_valid(link)) {
6452 rc = ap->ops->scr_write(ap, reg, val);
6454 rc = ap->ops->scr_read(ap, reg, &val);
6460 return sata_pmp_scr_write(link, reg, val);
6464 * ata_link_online - test whether the given link is online
6465 * @link: ATA link to test
6467 * Test whether @link is online. Note that this function returns
6468 * 0 if online status of @link cannot be obtained, so
6469 * ata_link_online(link) != !ata_link_offline(link).
6475 * 1 if the port online status is available and online.
6477 int ata_link_online(struct ata_link *link)
6481 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6482 (sstatus & 0xf) == 0x3)
6488 * ata_link_offline - test whether the given link is offline
6489 * @link: ATA link to test
6491 * Test whether @link is offline. Note that this function
6492 * returns 0 if offline status of @link cannot be obtained, so
6493 * ata_link_online(link) != !ata_link_offline(link).
6499 * 1 if the port offline status is available and offline.
6501 int ata_link_offline(struct ata_link *link)
6505 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6506 (sstatus & 0xf) != 0x3)
6511 int ata_flush_cache(struct ata_device *dev)
6513 unsigned int err_mask;
6516 if (!ata_try_flush_cache(dev))
6519 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6520 cmd = ATA_CMD_FLUSH_EXT;
6522 cmd = ATA_CMD_FLUSH;
6524 /* This is wrong. On a failed flush we get back the LBA of the lost
6525 sector and we should (assuming it wasn't aborted as unknown) issue
6526 a further flush command to continue the writeback until it
6528 err_mask = ata_do_simple_cmd(dev, cmd);
6530 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6538 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6539 unsigned int action, unsigned int ehi_flags,
6542 unsigned long flags;
6545 for (i = 0; i < host->n_ports; i++) {
6546 struct ata_port *ap = host->ports[i];
6547 struct ata_link *link;
6549 /* Previous resume operation might still be in
6550 * progress. Wait for PM_PENDING to clear.
6552 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6553 ata_port_wait_eh(ap);
6554 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6557 /* request PM ops to EH */
6558 spin_lock_irqsave(ap->lock, flags);
6563 ap->pm_result = &rc;
6566 ap->pflags |= ATA_PFLAG_PM_PENDING;
6567 __ata_port_for_each_link(link, ap) {
6568 link->eh_info.action |= action;
6569 link->eh_info.flags |= ehi_flags;
6572 ata_port_schedule_eh(ap);
6574 spin_unlock_irqrestore(ap->lock, flags);
6576 /* wait and check result */
6578 ata_port_wait_eh(ap);
6579 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6589 * ata_host_suspend - suspend host
6590 * @host: host to suspend
6593 * Suspend @host. Actual operation is performed by EH. This
6594 * function requests EH to perform PM operations and waits for EH
6598 * Kernel thread context (may sleep).
6601 * 0 on success, -errno on failure.
6603 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6608 * disable link pm on all ports before requesting
6611 ata_lpm_enable(host);
6613 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6615 host->dev->power.power_state = mesg;
6620 * ata_host_resume - resume host
6621 * @host: host to resume
6623 * Resume @host. Actual operation is performed by EH. This
6624 * function requests EH to perform PM operations and returns.
6625 * Note that all resume operations are performed parallely.
6628 * Kernel thread context (may sleep).
6630 void ata_host_resume(struct ata_host *host)
6632 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6633 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6634 host->dev->power.power_state = PMSG_ON;
6636 /* reenable link pm */
6637 ata_lpm_disable(host);
6642 * ata_port_start - Set port up for dma.
6643 * @ap: Port to initialize
6645 * Called just after data structures for each port are
6646 * initialized. Allocates space for PRD table.
6648 * May be used as the port_start() entry in ata_port_operations.
6651 * Inherited from caller.
6653 int ata_port_start(struct ata_port *ap)
6655 struct device *dev = ap->dev;
6657 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6666 * ata_dev_init - Initialize an ata_device structure
6667 * @dev: Device structure to initialize
6669 * Initialize @dev in preparation for probing.
6672 * Inherited from caller.
6674 void ata_dev_init(struct ata_device *dev)
6676 struct ata_link *link = dev->link;
6677 struct ata_port *ap = link->ap;
6678 unsigned long flags;
6680 /* SATA spd limit is bound to the first device */
6681 link->sata_spd_limit = link->hw_sata_spd_limit;
6684 /* High bits of dev->flags are used to record warm plug
6685 * requests which occur asynchronously. Synchronize using
6688 spin_lock_irqsave(ap->lock, flags);
6689 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6691 spin_unlock_irqrestore(ap->lock, flags);
6693 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6694 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6695 dev->pio_mask = UINT_MAX;
6696 dev->mwdma_mask = UINT_MAX;
6697 dev->udma_mask = UINT_MAX;
6701 * ata_link_init - Initialize an ata_link structure
6702 * @ap: ATA port link is attached to
6703 * @link: Link structure to initialize
6704 * @pmp: Port multiplier port number
6709 * Kernel thread context (may sleep)
6711 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6715 /* clear everything except for devices */
6716 memset(link, 0, offsetof(struct ata_link, device[0]));
6720 link->active_tag = ATA_TAG_POISON;
6721 link->hw_sata_spd_limit = UINT_MAX;
6723 /* can't use iterator, ap isn't initialized yet */
6724 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6725 struct ata_device *dev = &link->device[i];
6728 dev->devno = dev - link->device;
6734 * sata_link_init_spd - Initialize link->sata_spd_limit
6735 * @link: Link to configure sata_spd_limit for
6737 * Initialize @link->[hw_]sata_spd_limit to the currently
6741 * Kernel thread context (may sleep).
6744 * 0 on success, -errno on failure.
6746 int sata_link_init_spd(struct ata_link *link)
6752 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6756 spd = (scontrol >> 4) & 0xf;
6758 link->hw_sata_spd_limit &= (1 << spd) - 1;
6760 ata_force_spd_limit(link);
6762 link->sata_spd_limit = link->hw_sata_spd_limit;
6768 * ata_port_alloc - allocate and initialize basic ATA port resources
6769 * @host: ATA host this allocated port belongs to
6771 * Allocate and initialize basic ATA port resources.
6774 * Allocate ATA port on success, NULL on failure.
6777 * Inherited from calling layer (may sleep).
6779 struct ata_port *ata_port_alloc(struct ata_host *host)
6781 struct ata_port *ap;
6785 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6789 ap->pflags |= ATA_PFLAG_INITIALIZING;
6790 ap->lock = &host->lock;
6791 ap->flags = ATA_FLAG_DISABLED;
6793 ap->ctl = ATA_DEVCTL_OBS;
6795 ap->dev = host->dev;
6796 ap->last_ctl = 0xFF;
6798 #if defined(ATA_VERBOSE_DEBUG)
6799 /* turn on all debugging levels */
6800 ap->msg_enable = 0x00FF;
6801 #elif defined(ATA_DEBUG)
6802 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6804 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6807 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
6808 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6809 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6810 INIT_LIST_HEAD(&ap->eh_done_q);
6811 init_waitqueue_head(&ap->eh_wait_q);
6812 init_timer_deferrable(&ap->fastdrain_timer);
6813 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6814 ap->fastdrain_timer.data = (unsigned long)ap;
6816 ap->cbl = ATA_CBL_NONE;
6818 ata_link_init(ap, &ap->link, 0);
6821 ap->stats.unhandled_irq = 1;
6822 ap->stats.idle_irq = 1;
6827 static void ata_host_release(struct device *gendev, void *res)
6829 struct ata_host *host = dev_get_drvdata(gendev);
6832 for (i = 0; i < host->n_ports; i++) {
6833 struct ata_port *ap = host->ports[i];
6839 scsi_host_put(ap->scsi_host);
6841 kfree(ap->pmp_link);
6843 host->ports[i] = NULL;
6846 dev_set_drvdata(gendev, NULL);
6850 * ata_host_alloc - allocate and init basic ATA host resources
6851 * @dev: generic device this host is associated with
6852 * @max_ports: maximum number of ATA ports associated with this host
6854 * Allocate and initialize basic ATA host resources. LLD calls
6855 * this function to allocate a host, initializes it fully and
6856 * attaches it using ata_host_register().
6858 * @max_ports ports are allocated and host->n_ports is
6859 * initialized to @max_ports. The caller is allowed to decrease
6860 * host->n_ports before calling ata_host_register(). The unused
6861 * ports will be automatically freed on registration.
6864 * Allocate ATA host on success, NULL on failure.
6867 * Inherited from calling layer (may sleep).
6869 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6871 struct ata_host *host;
6877 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6880 /* alloc a container for our list of ATA ports (buses) */
6881 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6882 /* alloc a container for our list of ATA ports (buses) */
6883 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6887 devres_add(dev, host);
6888 dev_set_drvdata(dev, host);
6890 spin_lock_init(&host->lock);
6892 host->n_ports = max_ports;
6894 /* allocate ports bound to this host */
6895 for (i = 0; i < max_ports; i++) {
6896 struct ata_port *ap;
6898 ap = ata_port_alloc(host);
6903 host->ports[i] = ap;
6906 devres_remove_group(dev, NULL);
6910 devres_release_group(dev, NULL);
6915 * ata_host_alloc_pinfo - alloc host and init with port_info array
6916 * @dev: generic device this host is associated with
6917 * @ppi: array of ATA port_info to initialize host with
6918 * @n_ports: number of ATA ports attached to this host
6920 * Allocate ATA host and initialize with info from @ppi. If NULL
6921 * terminated, @ppi may contain fewer entries than @n_ports. The
6922 * last entry will be used for the remaining ports.
6925 * Allocate ATA host on success, NULL on failure.
6928 * Inherited from calling layer (may sleep).
6930 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6931 const struct ata_port_info * const * ppi,
6934 const struct ata_port_info *pi;
6935 struct ata_host *host;
6938 host = ata_host_alloc(dev, n_ports);
6942 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6943 struct ata_port *ap = host->ports[i];
6948 ap->pio_mask = pi->pio_mask;
6949 ap->mwdma_mask = pi->mwdma_mask;
6950 ap->udma_mask = pi->udma_mask;
6951 ap->flags |= pi->flags;
6952 ap->link.flags |= pi->link_flags;
6953 ap->ops = pi->port_ops;
6955 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6956 host->ops = pi->port_ops;
6957 if (!host->private_data && pi->private_data)
6958 host->private_data = pi->private_data;
6964 static void ata_host_stop(struct device *gendev, void *res)
6966 struct ata_host *host = dev_get_drvdata(gendev);
6969 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6971 for (i = 0; i < host->n_ports; i++) {
6972 struct ata_port *ap = host->ports[i];
6974 if (ap->ops->port_stop)
6975 ap->ops->port_stop(ap);
6978 if (host->ops->host_stop)
6979 host->ops->host_stop(host);
6983 * ata_host_start - start and freeze ports of an ATA host
6984 * @host: ATA host to start ports for
6986 * Start and then freeze ports of @host. Started status is
6987 * recorded in host->flags, so this function can be called
6988 * multiple times. Ports are guaranteed to get started only
6989 * once. If host->ops isn't initialized yet, its set to the
6990 * first non-dummy port ops.
6993 * Inherited from calling layer (may sleep).
6996 * 0 if all ports are started successfully, -errno otherwise.
6998 int ata_host_start(struct ata_host *host)
7001 void *start_dr = NULL;
7004 if (host->flags & ATA_HOST_STARTED)
7007 for (i = 0; i < host->n_ports; i++) {
7008 struct ata_port *ap = host->ports[i];
7010 if (!host->ops && !ata_port_is_dummy(ap))
7011 host->ops = ap->ops;
7013 if (ap->ops->port_stop)
7017 if (host->ops->host_stop)
7021 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7026 for (i = 0; i < host->n_ports; i++) {
7027 struct ata_port *ap = host->ports[i];
7029 if (ap->ops->port_start) {
7030 rc = ap->ops->port_start(ap);
7033 dev_printk(KERN_ERR, host->dev,
7034 "failed to start port %d "
7035 "(errno=%d)\n", i, rc);
7039 ata_eh_freeze_port(ap);
7043 devres_add(host->dev, start_dr);
7044 host->flags |= ATA_HOST_STARTED;
7049 struct ata_port *ap = host->ports[i];
7051 if (ap->ops->port_stop)
7052 ap->ops->port_stop(ap);
7054 devres_free(start_dr);
7059 * ata_sas_host_init - Initialize a host struct
7060 * @host: host to initialize
7061 * @dev: device host is attached to
7062 * @flags: host flags
7066 * PCI/etc. bus probe sem.
7069 /* KILLME - the only user left is ipr */
7070 void ata_host_init(struct ata_host *host, struct device *dev,
7071 unsigned long flags, const struct ata_port_operations *ops)
7073 spin_lock_init(&host->lock);
7075 host->flags = flags;
7080 * ata_host_register - register initialized ATA host
7081 * @host: ATA host to register
7082 * @sht: template for SCSI host
7084 * Register initialized ATA host. @host is allocated using
7085 * ata_host_alloc() and fully initialized by LLD. This function
7086 * starts ports, registers @host with ATA and SCSI layers and
7087 * probe registered devices.
7090 * Inherited from calling layer (may sleep).
7093 * 0 on success, -errno otherwise.
7095 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7099 /* host must have been started */
7100 if (!(host->flags & ATA_HOST_STARTED)) {
7101 dev_printk(KERN_ERR, host->dev,
7102 "BUG: trying to register unstarted host\n");
7107 /* Blow away unused ports. This happens when LLD can't
7108 * determine the exact number of ports to allocate at
7111 for (i = host->n_ports; host->ports[i]; i++)
7112 kfree(host->ports[i]);
7114 /* give ports names and add SCSI hosts */
7115 for (i = 0; i < host->n_ports; i++)
7116 host->ports[i]->print_id = ata_print_id++;
7118 rc = ata_scsi_add_hosts(host, sht);
7122 /* associate with ACPI nodes */
7123 ata_acpi_associate(host);
7125 /* set cable, sata_spd_limit and report */
7126 for (i = 0; i < host->n_ports; i++) {
7127 struct ata_port *ap = host->ports[i];
7128 unsigned long xfer_mask;
7130 /* set SATA cable type if still unset */
7131 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7132 ap->cbl = ATA_CBL_SATA;
7134 /* init sata_spd_limit to the current value */
7135 sata_link_init_spd(&ap->link);
7137 /* print per-port info to dmesg */
7138 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7141 if (!ata_port_is_dummy(ap)) {
7142 ata_port_printk(ap, KERN_INFO,
7143 "%cATA max %s %s\n",
7144 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
7145 ata_mode_string(xfer_mask),
7146 ap->link.eh_info.desc);
7147 ata_ehi_clear_desc(&ap->link.eh_info);
7149 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7152 /* perform each probe synchronously */
7153 DPRINTK("probe begin\n");
7154 for (i = 0; i < host->n_ports; i++) {
7155 struct ata_port *ap = host->ports[i];
7158 if (ap->ops->error_handler) {
7159 struct ata_eh_info *ehi = &ap->link.eh_info;
7160 unsigned long flags;
7164 /* kick EH for boot probing */
7165 spin_lock_irqsave(ap->lock, flags);
7168 (1 << ata_link_max_devices(&ap->link)) - 1;
7169 ehi->action |= ATA_EH_SOFTRESET;
7170 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7172 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
7173 ap->pflags |= ATA_PFLAG_LOADING;
7174 ata_port_schedule_eh(ap);
7176 spin_unlock_irqrestore(ap->lock, flags);
7178 /* wait for EH to finish */
7179 ata_port_wait_eh(ap);
7181 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7182 rc = ata_bus_probe(ap);
7183 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7186 /* FIXME: do something useful here?
7187 * Current libata behavior will
7188 * tear down everything when
7189 * the module is removed
7190 * or the h/w is unplugged.
7196 /* probes are done, now scan each port's disk(s) */
7197 DPRINTK("host probe begin\n");
7198 for (i = 0; i < host->n_ports; i++) {
7199 struct ata_port *ap = host->ports[i];
7201 ata_scsi_scan_host(ap, 1);
7202 ata_lpm_schedule(ap, ap->pm_policy);
7209 * ata_host_activate - start host, request IRQ and register it
7210 * @host: target ATA host
7211 * @irq: IRQ to request
7212 * @irq_handler: irq_handler used when requesting IRQ
7213 * @irq_flags: irq_flags used when requesting IRQ
7214 * @sht: scsi_host_template to use when registering the host
7216 * After allocating an ATA host and initializing it, most libata
7217 * LLDs perform three steps to activate the host - start host,
7218 * request IRQ and register it. This helper takes necessasry
7219 * arguments and performs the three steps in one go.
7221 * An invalid IRQ skips the IRQ registration and expects the host to
7222 * have set polling mode on the port. In this case, @irq_handler
7226 * Inherited from calling layer (may sleep).
7229 * 0 on success, -errno otherwise.
7231 int ata_host_activate(struct ata_host *host, int irq,
7232 irq_handler_t irq_handler, unsigned long irq_flags,
7233 struct scsi_host_template *sht)
7237 rc = ata_host_start(host);
7241 /* Special case for polling mode */
7243 WARN_ON(irq_handler);
7244 return ata_host_register(host, sht);
7247 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7248 dev_driver_string(host->dev), host);
7252 for (i = 0; i < host->n_ports; i++)
7253 ata_port_desc(host->ports[i], "irq %d", irq);
7255 rc = ata_host_register(host, sht);
7256 /* if failed, just free the IRQ and leave ports alone */
7258 devm_free_irq(host->dev, irq, host);
7264 * ata_port_detach - Detach ATA port in prepration of device removal
7265 * @ap: ATA port to be detached
7267 * Detach all ATA devices and the associated SCSI devices of @ap;
7268 * then, remove the associated SCSI host. @ap is guaranteed to
7269 * be quiescent on return from this function.
7272 * Kernel thread context (may sleep).
7274 static void ata_port_detach(struct ata_port *ap)
7276 unsigned long flags;
7277 struct ata_link *link;
7278 struct ata_device *dev;
7280 if (!ap->ops->error_handler)
7283 /* tell EH we're leaving & flush EH */
7284 spin_lock_irqsave(ap->lock, flags);
7285 ap->pflags |= ATA_PFLAG_UNLOADING;
7286 spin_unlock_irqrestore(ap->lock, flags);
7288 ata_port_wait_eh(ap);
7290 /* EH is now guaranteed to see UNLOADING - EH context belongs
7291 * to us. Disable all existing devices.
7293 ata_port_for_each_link(link, ap) {
7294 ata_link_for_each_dev(dev, link)
7295 ata_dev_disable(dev);
7298 /* Final freeze & EH. All in-flight commands are aborted. EH
7299 * will be skipped and retrials will be terminated with bad
7302 spin_lock_irqsave(ap->lock, flags);
7303 ata_port_freeze(ap); /* won't be thawed */
7304 spin_unlock_irqrestore(ap->lock, flags);
7306 ata_port_wait_eh(ap);
7307 cancel_rearming_delayed_work(&ap->hotplug_task);
7310 /* remove the associated SCSI host */
7311 scsi_remove_host(ap->scsi_host);
7315 * ata_host_detach - Detach all ports of an ATA host
7316 * @host: Host to detach
7318 * Detach all ports of @host.
7321 * Kernel thread context (may sleep).
7323 void ata_host_detach(struct ata_host *host)
7327 for (i = 0; i < host->n_ports; i++)
7328 ata_port_detach(host->ports[i]);
7330 /* the host is dead now, dissociate ACPI */
7331 ata_acpi_dissociate(host);
7335 * ata_std_ports - initialize ioaddr with standard port offsets.
7336 * @ioaddr: IO address structure to be initialized
7338 * Utility function which initializes data_addr, error_addr,
7339 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7340 * device_addr, status_addr, and command_addr to standard offsets
7341 * relative to cmd_addr.
7343 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
7346 void ata_std_ports(struct ata_ioports *ioaddr)
7348 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7349 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7350 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7351 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7352 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7353 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7354 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7355 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7356 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7357 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7364 * ata_pci_remove_one - PCI layer callback for device removal
7365 * @pdev: PCI device that was removed
7367 * PCI layer indicates to libata via this hook that hot-unplug or
7368 * module unload event has occurred. Detach all ports. Resource
7369 * release is handled via devres.
7372 * Inherited from PCI layer (may sleep).
7374 void ata_pci_remove_one(struct pci_dev *pdev)
7376 struct device *dev = &pdev->dev;
7377 struct ata_host *host = dev_get_drvdata(dev);
7379 ata_host_detach(host);
7382 /* move to PCI subsystem */
7383 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
7385 unsigned long tmp = 0;
7387 switch (bits->width) {
7390 pci_read_config_byte(pdev, bits->reg, &tmp8);
7396 pci_read_config_word(pdev, bits->reg, &tmp16);
7402 pci_read_config_dword(pdev, bits->reg, &tmp32);
7413 return (tmp == bits->val) ? 1 : 0;
7417 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
7419 pci_save_state(pdev);
7420 pci_disable_device(pdev);
7422 if (mesg.event & PM_EVENT_SLEEP)
7423 pci_set_power_state(pdev, PCI_D3hot);
7426 int ata_pci_device_do_resume(struct pci_dev *pdev)
7430 pci_set_power_state(pdev, PCI_D0);
7431 pci_restore_state(pdev);
7433 rc = pcim_enable_device(pdev);
7435 dev_printk(KERN_ERR, &pdev->dev,
7436 "failed to enable device after resume (%d)\n", rc);
7440 pci_set_master(pdev);
7444 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
7446 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7449 rc = ata_host_suspend(host, mesg);
7453 ata_pci_device_do_suspend(pdev, mesg);
7458 int ata_pci_device_resume(struct pci_dev *pdev)
7460 struct ata_host *host = dev_get_drvdata(&pdev->dev);
7463 rc = ata_pci_device_do_resume(pdev);
7465 ata_host_resume(host);
7468 #endif /* CONFIG_PM */
7470 #endif /* CONFIG_PCI */
7472 static int __init ata_parse_force_one(char **cur,
7473 struct ata_force_ent *force_ent,
7474 const char **reason)
7476 /* FIXME: Currently, there's no way to tag init const data and
7477 * using __initdata causes build failure on some versions of
7478 * gcc. Once __initdataconst is implemented, add const to the
7479 * following structure.
7481 static struct ata_force_param force_tbl[] __initdata = {
7482 { "40c", .cbl = ATA_CBL_PATA40 },
7483 { "80c", .cbl = ATA_CBL_PATA80 },
7484 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7485 { "unk", .cbl = ATA_CBL_PATA_UNK },
7486 { "ign", .cbl = ATA_CBL_PATA_IGN },
7487 { "sata", .cbl = ATA_CBL_SATA },
7488 { "1.5Gbps", .spd_limit = 1 },
7489 { "3.0Gbps", .spd_limit = 2 },
7490 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7491 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7492 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7493 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7494 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7495 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7496 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7497 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7498 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7499 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7500 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7501 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7502 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7503 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7504 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7505 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7506 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7507 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7508 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7509 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7510 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7511 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7512 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7513 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7514 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7515 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7516 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7517 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7518 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7519 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7520 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7521 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7522 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7523 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7524 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7525 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7527 char *start = *cur, *p = *cur;
7528 char *id, *val, *endp;
7529 const struct ata_force_param *match_fp = NULL;
7530 int nr_matches = 0, i;
7532 /* find where this param ends and update *cur */
7533 while (*p != '\0' && *p != ',')
7544 p = strchr(start, ':');
7546 val = strstrip(start);
7551 id = strstrip(start);
7552 val = strstrip(p + 1);
7555 p = strchr(id, '.');
7558 force_ent->device = simple_strtoul(p, &endp, 10);
7559 if (p == endp || *endp != '\0') {
7560 *reason = "invalid device";
7565 force_ent->port = simple_strtoul(id, &endp, 10);
7566 if (p == endp || *endp != '\0') {
7567 *reason = "invalid port/link";
7572 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7573 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7574 const struct ata_force_param *fp = &force_tbl[i];
7576 if (strncasecmp(val, fp->name, strlen(val)))
7582 if (strcasecmp(val, fp->name) == 0) {
7589 *reason = "unknown value";
7592 if (nr_matches > 1) {
7593 *reason = "ambigious value";
7597 force_ent->param = *match_fp;
7602 static void __init ata_parse_force_param(void)
7604 int idx = 0, size = 1;
7605 int last_port = -1, last_device = -1;
7606 char *p, *cur, *next;
7608 /* calculate maximum number of params and allocate force_tbl */
7609 for (p = ata_force_param_buf; *p; p++)
7613 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7614 if (!ata_force_tbl) {
7615 printk(KERN_WARNING "ata: failed to extend force table, "
7616 "libata.force ignored\n");
7620 /* parse and populate the table */
7621 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7622 const char *reason = "";
7623 struct ata_force_ent te = { .port = -1, .device = -1 };
7626 if (ata_parse_force_one(&next, &te, &reason)) {
7627 printk(KERN_WARNING "ata: failed to parse force "
7628 "parameter \"%s\" (%s)\n",
7633 if (te.port == -1) {
7634 te.port = last_port;
7635 te.device = last_device;
7638 ata_force_tbl[idx++] = te;
7640 last_port = te.port;
7641 last_device = te.device;
7644 ata_force_tbl_size = idx;
7647 static int __init ata_init(void)
7649 ata_probe_timeout *= HZ;
7651 ata_parse_force_param();
7653 ata_wq = create_workqueue("ata");
7657 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7659 destroy_workqueue(ata_wq);
7663 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7667 static void __exit ata_exit(void)
7669 kfree(ata_force_tbl);
7670 destroy_workqueue(ata_wq);
7671 destroy_workqueue(ata_aux_wq);
7674 subsys_initcall(ata_init);
7675 module_exit(ata_exit);
7677 static unsigned long ratelimit_time;
7678 static DEFINE_SPINLOCK(ata_ratelimit_lock);
7680 int ata_ratelimit(void)
7683 unsigned long flags;
7685 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7687 if (time_after(jiffies, ratelimit_time)) {
7689 ratelimit_time = jiffies + (HZ/5);
7693 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7699 * ata_wait_register - wait until register value changes
7700 * @reg: IO-mapped register
7701 * @mask: Mask to apply to read register value
7702 * @val: Wait condition
7703 * @interval_msec: polling interval in milliseconds
7704 * @timeout_msec: timeout in milliseconds
7706 * Waiting for some bits of register to change is a common
7707 * operation for ATA controllers. This function reads 32bit LE
7708 * IO-mapped register @reg and tests for the following condition.
7710 * (*@reg & mask) != val
7712 * If the condition is met, it returns; otherwise, the process is
7713 * repeated after @interval_msec until timeout.
7716 * Kernel thread context (may sleep)
7719 * The final register value.
7721 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7722 unsigned long interval_msec,
7723 unsigned long timeout_msec)
7725 unsigned long timeout;
7728 tmp = ioread32(reg);
7730 /* Calculate timeout _after_ the first read to make sure
7731 * preceding writes reach the controller before starting to
7732 * eat away the timeout.
7734 timeout = jiffies + (timeout_msec * HZ) / 1000;
7736 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7737 msleep(interval_msec);
7738 tmp = ioread32(reg);
7747 static void ata_dummy_noret(struct ata_port *ap) { }
7748 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7749 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7751 static u8 ata_dummy_check_status(struct ata_port *ap)
7756 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7758 return AC_ERR_SYSTEM;
7761 const struct ata_port_operations ata_dummy_port_ops = {
7762 .check_status = ata_dummy_check_status,
7763 .check_altstatus = ata_dummy_check_status,
7764 .dev_select = ata_noop_dev_select,
7765 .qc_prep = ata_noop_qc_prep,
7766 .qc_issue = ata_dummy_qc_issue,
7767 .freeze = ata_dummy_noret,
7768 .thaw = ata_dummy_noret,
7769 .error_handler = ata_dummy_noret,
7770 .post_internal_cmd = ata_dummy_qc_noret,
7771 .irq_clear = ata_dummy_noret,
7772 .port_start = ata_dummy_ret0,
7773 .port_stop = ata_dummy_noret,
7776 const struct ata_port_info ata_dummy_port_info = {
7777 .port_ops = &ata_dummy_port_ops,
7781 * libata is essentially a library of internal helper functions for
7782 * low-level ATA host controller drivers. As such, the API/ABI is
7783 * likely to change as new drivers are added and updated.
7784 * Do not depend on ABI/API stability.
7786 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7787 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7788 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7789 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7790 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7791 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7792 EXPORT_SYMBOL_GPL(ata_std_ports);
7793 EXPORT_SYMBOL_GPL(ata_host_init);
7794 EXPORT_SYMBOL_GPL(ata_host_alloc);
7795 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7796 EXPORT_SYMBOL_GPL(ata_host_start);
7797 EXPORT_SYMBOL_GPL(ata_host_register);
7798 EXPORT_SYMBOL_GPL(ata_host_activate);
7799 EXPORT_SYMBOL_GPL(ata_host_detach);
7800 EXPORT_SYMBOL_GPL(ata_sg_init);
7801 EXPORT_SYMBOL_GPL(ata_hsm_move);
7802 EXPORT_SYMBOL_GPL(ata_qc_complete);
7803 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7804 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7805 EXPORT_SYMBOL_GPL(ata_tf_load);
7806 EXPORT_SYMBOL_GPL(ata_tf_read);
7807 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7808 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7809 EXPORT_SYMBOL_GPL(sata_print_link_status);
7810 EXPORT_SYMBOL_GPL(atapi_cmd_type);
7811 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7812 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7813 EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7814 EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7815 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7816 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7817 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7818 EXPORT_SYMBOL_GPL(ata_mode_string);
7819 EXPORT_SYMBOL_GPL(ata_id_xfermask);
7820 EXPORT_SYMBOL_GPL(ata_check_status);
7821 EXPORT_SYMBOL_GPL(ata_altstatus);
7822 EXPORT_SYMBOL_GPL(ata_exec_command);
7823 EXPORT_SYMBOL_GPL(ata_port_start);
7824 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7825 EXPORT_SYMBOL_GPL(ata_interrupt);
7826 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7827 EXPORT_SYMBOL_GPL(ata_data_xfer);
7828 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7829 EXPORT_SYMBOL_GPL(ata_std_qc_defer);
7830 EXPORT_SYMBOL_GPL(ata_qc_prep);
7831 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7832 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7833 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7834 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7835 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7836 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7837 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7838 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7839 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7840 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7841 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7842 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7843 EXPORT_SYMBOL_GPL(ata_port_probe);
7844 EXPORT_SYMBOL_GPL(ata_dev_disable);
7845 EXPORT_SYMBOL_GPL(sata_set_spd);
7846 EXPORT_SYMBOL_GPL(sata_link_debounce);
7847 EXPORT_SYMBOL_GPL(sata_link_resume);
7848 EXPORT_SYMBOL_GPL(ata_bus_reset);
7849 EXPORT_SYMBOL_GPL(ata_std_prereset);
7850 EXPORT_SYMBOL_GPL(ata_std_softreset);
7851 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7852 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7853 EXPORT_SYMBOL_GPL(ata_std_postreset);
7854 EXPORT_SYMBOL_GPL(ata_dev_classify);
7855 EXPORT_SYMBOL_GPL(ata_dev_pair);
7856 EXPORT_SYMBOL_GPL(ata_port_disable);
7857 EXPORT_SYMBOL_GPL(ata_ratelimit);
7858 EXPORT_SYMBOL_GPL(ata_wait_register);
7859 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7860 EXPORT_SYMBOL_GPL(ata_wait_after_reset);
7861 EXPORT_SYMBOL_GPL(ata_wait_ready);
7862 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7863 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7864 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7865 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7866 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7867 EXPORT_SYMBOL_GPL(ata_host_intr);
7868 EXPORT_SYMBOL_GPL(sata_scr_valid);
7869 EXPORT_SYMBOL_GPL(sata_scr_read);
7870 EXPORT_SYMBOL_GPL(sata_scr_write);
7871 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7872 EXPORT_SYMBOL_GPL(ata_link_online);
7873 EXPORT_SYMBOL_GPL(ata_link_offline);
7875 EXPORT_SYMBOL_GPL(ata_host_suspend);
7876 EXPORT_SYMBOL_GPL(ata_host_resume);
7877 #endif /* CONFIG_PM */
7878 EXPORT_SYMBOL_GPL(ata_id_string);
7879 EXPORT_SYMBOL_GPL(ata_id_c_string);
7880 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7882 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7883 EXPORT_SYMBOL_GPL(ata_timing_find_mode);
7884 EXPORT_SYMBOL_GPL(ata_timing_compute);
7885 EXPORT_SYMBOL_GPL(ata_timing_merge);
7886 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
7889 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7890 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7891 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7892 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7893 EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
7894 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7895 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7897 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7898 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7899 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7900 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7901 #endif /* CONFIG_PM */
7902 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7903 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7904 #endif /* CONFIG_PCI */
7906 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
7907 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7908 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7909 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7910 EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7912 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7913 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7914 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7915 EXPORT_SYMBOL_GPL(ata_port_desc);
7917 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7918 #endif /* CONFIG_PCI */
7919 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7920 EXPORT_SYMBOL_GPL(ata_link_abort);
7921 EXPORT_SYMBOL_GPL(ata_port_abort);
7922 EXPORT_SYMBOL_GPL(ata_port_freeze);
7923 EXPORT_SYMBOL_GPL(sata_async_notification);
7924 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7925 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7926 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7927 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7928 EXPORT_SYMBOL_GPL(ata_do_eh);
7929 EXPORT_SYMBOL_GPL(ata_irq_on);
7930 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7932 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7933 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7934 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7935 EXPORT_SYMBOL_GPL(ata_cable_ignore);
7936 EXPORT_SYMBOL_GPL(ata_cable_sata);