2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 #define DRV_VERSION "2.20" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
75 static unsigned int ata_print_id = 1;
76 static struct workqueue_struct *ata_wq;
78 struct workqueue_struct *ata_aux_wq;
80 int atapi_enabled = 1;
81 module_param(atapi_enabled, int, 0444);
82 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
85 module_param(atapi_dmadir, int, 0444);
86 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
89 module_param_named(fua, libata_fua, int, 0444);
90 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
92 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93 module_param(ata_probe_timeout, int, 0444);
94 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
96 int libata_noacpi = 1;
97 module_param_named(noacpi, libata_noacpi, int, 0444);
98 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
100 MODULE_AUTHOR("Jeff Garzik");
101 MODULE_DESCRIPTION("Library module for ATA devices");
102 MODULE_LICENSE("GPL");
103 MODULE_VERSION(DRV_VERSION);
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
116 * Inherited from caller.
119 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
138 fis[13] = tf->hob_nsect;
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
156 * Inherited from caller.
159 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
174 tf->hob_nsect = fis[13];
177 static const u8 ata_rw_cmds[] = {
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
204 ATA_CMD_WRITE_FUA_EXT
208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
212 * Examine the device configuration and tf->flags to calculate
213 * the proper read/write commands and protocol to use.
218 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
222 int index, fua, lba48, write;
224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
230 index = dev->multi_count ? 0 : 8;
231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
234 index = dev->multi_count ? 0 : 8;
236 tf->protocol = ATA_PROT_DMA;
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
261 * Block address read from @tf.
263 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
273 block |= (tf->device & 0xf) << 24;
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
311 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
320 if (!lba_48_ok(block, n_block))
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
329 tf->command = ATA_CMD_FPDMA_READ;
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
348 if (lba_28_ok(block, n_block)) {
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
356 tf->flags |= ATA_TFLAG_LBA48;
358 tf->hob_nsect = (n_block >> 8) & 0xff;
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
364 /* request too large even for LBA48 */
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
370 tf->nsect = n_block & 0xff;
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
376 tf->device |= ATA_LBA;
379 u32 sect, head, cyl, track;
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
397 /* Check whether the converted CHS can fit.
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
429 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
448 static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
461 static const struct ata_xfer_ent {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
482 * Matching XFER_* value, 0 if no match found.
484 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
499 * Return matching xfer_mask for @xfer_mode.
505 * Matching xfer_mask, 0 if no match found.
507 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
509 const struct ata_xfer_ent *ent;
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
521 * Return matching xfer_shift for @xfer_mode.
527 * Matching xfer_shift, -1 if no match found.
529 static int ata_xfer_mode2shift(unsigned int xfer_mode)
531 const struct ata_xfer_ent *ent;
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
543 * Determine string which represents the highest speed
544 * (highest bit in @modemask).
550 * Constant C string representing highest speed listed in
551 * @mode_mask, or the constant C string "<n/a>".
553 static const char *ata_mode_string(unsigned int xfer_mask)
555 static const char * const xfer_mode_str[] = {
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
585 static const char *sata_spd_string(unsigned int spd)
587 static const char * const spd_str[] = {
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
594 return spd_str[spd - 1];
597 void ata_dev_disable(struct ata_device *dev)
599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
608 * ata_devchk - PATA device presence detection
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
625 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
627 struct ata_ioports *ioaddr = &ap->ioaddr;
630 ap->ops->dev_select(ap, device);
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
647 return 0; /* nothing found */
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
666 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
693 * @r_err: Value of error register on completion
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
712 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
714 struct ata_taskfile tf;
718 ap->ops->dev_select(ap, device);
720 memset(&tf, 0, sizeof(tf));
722 ap->ops->tf_read(ap, &tf);
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
733 else if ((device == 0) && (err == 0x81))
738 /* determine if device is ATA or ATAPI */
739 class = ata_dev_classify(&tf);
741 if (class == ATA_DEV_UNKNOWN)
743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
749 * ata_id_string - Convert IDENTIFY DEVICE page into string
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
763 void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
789 * This function is identical to ata_id_string except that it
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
796 void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
803 ata_id_string(id, s, ofs, len - 1);
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
811 static u64 ata_id_n_sectors(const u16 *id)
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
817 return ata_id_u32(id, 60);
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
822 return id[1] * id[3] * id[6];
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
829 * @unknown: mode to assume if we cannot tell
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
841 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
858 /* SWDMA perhaps ? */
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
873 * This function performs no actual function.
875 * May be used as the dev_select() entry in ata_port_operations.
880 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
892 * ATA channel. Works with both PIO and MMIO.
894 * May be used as the dev_select() entry in ata_port_operations.
900 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
905 tmp = ATA_DEVICE_OBS;
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
909 iowrite8(tmp, ap->ioaddr.device_addr);
910 ata_pause(ap); /* needed; also flushes, for mmio */
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
932 void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
935 if (ata_msg_probe(ap))
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
942 ap->ops->dev_select(ap, device);
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
953 * @id: IDENTIFY DEVICE page to dump
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
962 static inline void ata_dump_id(const u16 *id)
964 DPRINTK("49==0x%04x "
974 DPRINTK("80==0x%04x "
984 DPRINTK("88==0x%04x "
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
997 * FIXME: pre IDE drive timing (do we care ?).
1005 static unsigned int ata_id_xfermask(const u16 *id)
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1035 if (ata_id_is_cfa(id)) {
1037 * Process compact flash extended modes
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1043 pio_mask |= (1 << 5);
1045 pio_mask |= (1 << 6);
1047 mwdma_mask |= (1 << 3);
1049 mwdma_mask |= (1 << 4);
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
1062 * @fn: workqueue function to be scheduled
1063 * @data: data for @fn to use
1064 * @delay: delay time for workqueue function
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1076 * Inherited from caller.
1078 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1079 unsigned long delay)
1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
1091 /* rc == 0 means that another user is using port task */
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1103 * Kernel thread context (may sleep)
1105 void ata_port_flush_task(struct ata_port *ap)
1107 unsigned long flags;
1111 spin_lock_irqsave(ap->lock, flags);
1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
1113 spin_unlock_irqrestore(ap->lock, flags);
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1123 if (!cancel_delayed_work(&ap->port_task)) {
1124 if (ata_msg_ctl(ap))
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1127 flush_workqueue(ata_wq);
1130 spin_lock_irqsave(ap->lock, flags);
1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
1132 spin_unlock_irqrestore(ap->lock, flags);
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1138 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1140 struct completion *waiting = qc->private_data;
1146 * ata_exec_internal_sg - execute libata internal command
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
1149 * @cdb: CDB for packet command
1150 * @dma_dir: Data tranfer direction of the command
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1161 * None. Should be called with kernel context, might sleep.
1164 * Zero on success, AC_ERR_* mask on failure
1166 unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
1171 struct ata_port *ap = dev->ap;
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
1174 unsigned int tag, preempted_tag;
1175 u32 preempted_sactive, preempted_qc_active;
1176 DECLARE_COMPLETION_ONSTACK(wait);
1177 unsigned long flags;
1178 unsigned int err_mask;
1181 spin_lock_irqsave(ap->lock, flags);
1183 /* no internal command while frozen */
1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
1185 spin_unlock_irqrestore(ap->lock, flags);
1186 return AC_ERR_SYSTEM;
1189 /* initialize internal qc */
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1201 if (test_and_set_bit(tag, &ap->qc_allocated))
1203 qc = __ata_qc_from_tag(ap, tag);
1211 preempted_tag = ap->active_tag;
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
1214 ap->active_tag = ATA_TAG_POISON;
1218 /* prepare & issue qc */
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
1225 unsigned int i, buflen = 0;
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1230 ata_sg_init(qc, sg, n_elem);
1231 qc->nbytes = buflen;
1234 qc->private_data = &wait;
1235 qc->complete_fn = ata_qc_complete_internal;
1239 spin_unlock_irqrestore(ap->lock, flags);
1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1243 ata_port_flush_task(ap);
1246 spin_lock_irqsave(ap->lock, flags);
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1259 ata_qc_complete(qc);
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
1263 "qc timeout (cmd 0x%x)\n", command);
1266 spin_unlock_irqrestore(ap->lock, flags);
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1273 /* perform minimal error analysis */
1274 if (qc->flags & ATA_QCFLAG_FAILED) {
1275 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1276 qc->err_mask |= AC_ERR_DEV;
1279 qc->err_mask |= AC_ERR_OTHER;
1281 if (qc->err_mask & ~AC_ERR_OTHER)
1282 qc->err_mask &= ~AC_ERR_OTHER;
1286 spin_lock_irqsave(ap->lock, flags);
1288 *tf = qc->result_tf;
1289 err_mask = qc->err_mask;
1292 ap->active_tag = preempted_tag;
1293 ap->sactive = preempted_sactive;
1294 ap->qc_active = preempted_qc_active;
1296 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1297 * Until those drivers are fixed, we detect the condition
1298 * here, fail the command with AC_ERR_SYSTEM and reenable the
1301 * Note that this doesn't change any behavior as internal
1302 * command failure results in disabling the device in the
1303 * higher layer for LLDDs without new reset/EH callbacks.
1305 * Kill the following code as soon as those drivers are fixed.
1307 if (ap->flags & ATA_FLAG_DISABLED) {
1308 err_mask |= AC_ERR_SYSTEM;
1312 spin_unlock_irqrestore(ap->lock, flags);
1318 * ata_exec_internal - execute libata internal command
1319 * @dev: Device to which the command is sent
1320 * @tf: Taskfile registers for the command and the result
1321 * @cdb: CDB for packet command
1322 * @dma_dir: Data tranfer direction of the command
1323 * @buf: Data buffer of the command
1324 * @buflen: Length of data buffer
1326 * Wrapper around ata_exec_internal_sg() which takes simple
1327 * buffer instead of sg list.
1330 * None. Should be called with kernel context, might sleep.
1333 * Zero on success, AC_ERR_* mask on failure
1335 unsigned ata_exec_internal(struct ata_device *dev,
1336 struct ata_taskfile *tf, const u8 *cdb,
1337 int dma_dir, void *buf, unsigned int buflen)
1339 struct scatterlist *psg = NULL, sg;
1340 unsigned int n_elem = 0;
1342 if (dma_dir != DMA_NONE) {
1344 sg_init_one(&sg, buf, buflen);
1349 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1353 * ata_do_simple_cmd - execute simple internal command
1354 * @dev: Device to which the command is sent
1355 * @cmd: Opcode to execute
1357 * Execute a 'simple' command, that only consists of the opcode
1358 * 'cmd' itself, without filling any other registers
1361 * Kernel thread context (may sleep).
1364 * Zero on success, AC_ERR_* mask on failure
1366 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1368 struct ata_taskfile tf;
1370 ata_tf_init(dev, &tf);
1373 tf.flags |= ATA_TFLAG_DEVICE;
1374 tf.protocol = ATA_PROT_NODATA;
1376 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1380 * ata_pio_need_iordy - check if iordy needed
1383 * Check if the current speed of the device requires IORDY. Used
1384 * by various controllers for chip configuration.
1387 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1389 /* Controller doesn't support IORDY. Probably a pointless check
1390 as the caller should know this */
1391 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1393 /* PIO3 and higher it is mandatory */
1394 if (adev->pio_mode > XFER_PIO_2)
1396 /* We turn it on when possible */
1397 if (ata_id_has_iordy(adev->id))
1403 * ata_pio_mask_no_iordy - Return the non IORDY mask
1406 * Compute the highest mode possible if we are not using iordy. Return
1407 * -1 if no iordy mode is available.
1410 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1412 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1413 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1414 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1415 /* Is the speed faster than the drive allows non IORDY ? */
1417 /* This is cycle times not frequency - watch the logic! */
1418 if (pio > 240) /* PIO2 is 240nS per cycle */
1419 return 3 << ATA_SHIFT_PIO;
1420 return 7 << ATA_SHIFT_PIO;
1423 return 3 << ATA_SHIFT_PIO;
1427 * ata_dev_read_id - Read ID data from the specified device
1428 * @dev: target device
1429 * @p_class: pointer to class of the target device (may be changed)
1430 * @flags: ATA_READID_* flags
1431 * @id: buffer to read IDENTIFY data into
1433 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1434 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1435 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1436 * for pre-ATA4 drives.
1439 * Kernel thread context (may sleep)
1442 * 0 on success, -errno otherwise.
1444 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1445 unsigned int flags, u16 *id)
1447 struct ata_port *ap = dev->ap;
1448 unsigned int class = *p_class;
1449 struct ata_taskfile tf;
1450 unsigned int err_mask = 0;
1454 if (ata_msg_ctl(ap))
1455 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1457 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1460 ata_tf_init(dev, &tf);
1464 tf.command = ATA_CMD_ID_ATA;
1467 tf.command = ATA_CMD_ID_ATAPI;
1471 reason = "unsupported class";
1475 tf.protocol = ATA_PROT_PIO;
1477 /* Some devices choke if TF registers contain garbage. Make
1478 * sure those are properly initialized.
1480 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1482 /* Device presence detection is unreliable on some
1483 * controllers. Always poll IDENTIFY if available.
1485 tf.flags |= ATA_TFLAG_POLLING;
1487 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1488 id, sizeof(id[0]) * ATA_ID_WORDS);
1490 if (err_mask & AC_ERR_NODEV_HINT) {
1491 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1492 ap->print_id, dev->devno);
1497 reason = "I/O error";
1501 swap_buf_le16(id, ATA_ID_WORDS);
1505 reason = "device reports illegal type";
1507 if (class == ATA_DEV_ATA) {
1508 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1511 if (ata_id_is_ata(id))
1515 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1517 * The exact sequence expected by certain pre-ATA4 drives is:
1520 * INITIALIZE DEVICE PARAMETERS
1522 * Some drives were very specific about that exact sequence.
1524 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1525 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1528 reason = "INIT_DEV_PARAMS failed";
1532 /* current CHS translation info (id[53-58]) might be
1533 * changed. reread the identify device info.
1535 flags &= ~ATA_READID_POSTRESET;
1545 if (ata_msg_warn(ap))
1546 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1547 "(%s, err_mask=0x%x)\n", reason, err_mask);
1551 static inline u8 ata_dev_knobble(struct ata_device *dev)
1553 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1556 static void ata_dev_config_ncq(struct ata_device *dev,
1557 char *desc, size_t desc_sz)
1559 struct ata_port *ap = dev->ap;
1560 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1562 if (!ata_id_has_ncq(dev->id)) {
1566 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1567 snprintf(desc, desc_sz, "NCQ (not used)");
1570 if (ap->flags & ATA_FLAG_NCQ) {
1571 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1572 dev->flags |= ATA_DFLAG_NCQ;
1575 if (hdepth >= ddepth)
1576 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1578 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1582 * ata_dev_configure - Configure the specified ATA/ATAPI device
1583 * @dev: Target device to configure
1585 * Configure @dev according to @dev->id. Generic and low-level
1586 * driver specific fixups are also applied.
1589 * Kernel thread context (may sleep)
1592 * 0 on success, -errno otherwise
1594 int ata_dev_configure(struct ata_device *dev)
1596 struct ata_port *ap = dev->ap;
1597 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1598 const u16 *id = dev->id;
1599 unsigned int xfer_mask;
1600 char revbuf[7]; /* XYZ-99\0 */
1601 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1602 char modelbuf[ATA_ID_PROD_LEN+1];
1605 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1606 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1611 if (ata_msg_probe(ap))
1612 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1615 rc = ata_acpi_push_id(ap, dev->devno);
1617 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1621 /* retrieve and execute the ATA task file of _GTF */
1622 ata_acpi_exec_tfs(ap);
1624 /* print device capabilities */
1625 if (ata_msg_probe(ap))
1626 ata_dev_printk(dev, KERN_DEBUG,
1627 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1628 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1630 id[49], id[82], id[83], id[84],
1631 id[85], id[86], id[87], id[88]);
1633 /* initialize to-be-configured parameters */
1634 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1635 dev->max_sectors = 0;
1643 * common ATA, ATAPI feature tests
1646 /* find max transfer mode; for printk only */
1647 xfer_mask = ata_id_xfermask(id);
1649 if (ata_msg_probe(ap))
1652 /* ATA-specific feature tests */
1653 if (dev->class == ATA_DEV_ATA) {
1654 if (ata_id_is_cfa(id)) {
1655 if (id[162] & 1) /* CPRM may make this media unusable */
1656 ata_dev_printk(dev, KERN_WARNING,
1657 "supports DRM functions and may "
1658 "not be fully accessable.\n");
1659 snprintf(revbuf, 7, "CFA");
1662 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1664 dev->n_sectors = ata_id_n_sectors(id);
1666 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1667 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1670 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1673 if (dev->id[59] & 0x100)
1674 dev->multi_count = dev->id[59] & 0xff;
1676 if (ata_id_has_lba(id)) {
1677 const char *lba_desc;
1681 dev->flags |= ATA_DFLAG_LBA;
1682 if (ata_id_has_lba48(id)) {
1683 dev->flags |= ATA_DFLAG_LBA48;
1686 if (dev->n_sectors >= (1UL << 28) &&
1687 ata_id_has_flush_ext(id))
1688 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1692 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1694 /* print device info to dmesg */
1695 if (ata_msg_drv(ap) && print_info) {
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%s: %s, %s, max %s\n",
1698 revbuf, modelbuf, fwrevbuf,
1699 ata_mode_string(xfer_mask));
1700 ata_dev_printk(dev, KERN_INFO,
1701 "%Lu sectors, multi %u: %s %s\n",
1702 (unsigned long long)dev->n_sectors,
1703 dev->multi_count, lba_desc, ncq_desc);
1708 /* Default translation */
1709 dev->cylinders = id[1];
1711 dev->sectors = id[6];
1713 if (ata_id_current_chs_valid(id)) {
1714 /* Current CHS translation is valid. */
1715 dev->cylinders = id[54];
1716 dev->heads = id[55];
1717 dev->sectors = id[56];
1720 /* print device info to dmesg */
1721 if (ata_msg_drv(ap) && print_info) {
1722 ata_dev_printk(dev, KERN_INFO,
1723 "%s: %s, %s, max %s\n",
1724 revbuf, modelbuf, fwrevbuf,
1725 ata_mode_string(xfer_mask));
1726 ata_dev_printk(dev, KERN_INFO,
1727 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1728 (unsigned long long)dev->n_sectors,
1729 dev->multi_count, dev->cylinders,
1730 dev->heads, dev->sectors);
1737 /* ATAPI-specific feature tests */
1738 else if (dev->class == ATA_DEV_ATAPI) {
1739 char *cdb_intr_string = "";
1741 rc = atapi_cdb_len(id);
1742 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1743 if (ata_msg_warn(ap))
1744 ata_dev_printk(dev, KERN_WARNING,
1745 "unsupported CDB len\n");
1749 dev->cdb_len = (unsigned int) rc;
1751 if (ata_id_cdb_intr(dev->id)) {
1752 dev->flags |= ATA_DFLAG_CDB_INTR;
1753 cdb_intr_string = ", CDB intr";
1756 /* print device info to dmesg */
1757 if (ata_msg_drv(ap) && print_info)
1758 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1759 ata_mode_string(xfer_mask),
1763 /* determine max_sectors */
1764 dev->max_sectors = ATA_MAX_SECTORS;
1765 if (dev->flags & ATA_DFLAG_LBA48)
1766 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1768 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1769 /* Let the user know. We don't want to disallow opens for
1770 rescue purposes, or in case the vendor is just a blithering
1773 ata_dev_printk(dev, KERN_WARNING,
1774 "Drive reports diagnostics failure. This may indicate a drive\n");
1775 ata_dev_printk(dev, KERN_WARNING,
1776 "fault or invalid emulation. Contact drive vendor for information.\n");
1780 /* limit bridge transfers to udma5, 200 sectors */
1781 if (ata_dev_knobble(dev)) {
1782 if (ata_msg_drv(ap) && print_info)
1783 ata_dev_printk(dev, KERN_INFO,
1784 "applying bridge limits\n");
1785 dev->udma_mask &= ATA_UDMA5;
1786 dev->max_sectors = ATA_MAX_SECTORS;
1789 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
1790 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
1793 /* limit ATAPI DMA to R/W commands only */
1794 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
1795 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
1797 if (ap->ops->dev_config)
1798 ap->ops->dev_config(dev);
1800 if (ata_msg_probe(ap))
1801 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1802 __FUNCTION__, ata_chk_status(ap));
1806 if (ata_msg_probe(ap))
1807 ata_dev_printk(dev, KERN_DEBUG,
1808 "%s: EXIT, err\n", __FUNCTION__);
1813 * ata_cable_40wire - return 40 wire cable type
1816 * Helper method for drivers which want to hardwire 40 wire cable
1820 int ata_cable_40wire(struct ata_port *ap)
1822 return ATA_CBL_PATA40;
1826 * ata_cable_80wire - return 80 wire cable type
1829 * Helper method for drivers which want to hardwire 80 wire cable
1833 int ata_cable_80wire(struct ata_port *ap)
1835 return ATA_CBL_PATA80;
1839 * ata_cable_unknown - return unknown PATA cable.
1842 * Helper method for drivers which have no PATA cable detection.
1845 int ata_cable_unknown(struct ata_port *ap)
1847 return ATA_CBL_PATA_UNK;
1851 * ata_cable_sata - return SATA cable type
1854 * Helper method for drivers which have SATA cables
1857 int ata_cable_sata(struct ata_port *ap)
1859 return ATA_CBL_SATA;
1863 * ata_bus_probe - Reset and probe ATA bus
1866 * Master ATA bus probing function. Initiates a hardware-dependent
1867 * bus reset, then attempts to identify any devices found on
1871 * PCI/etc. bus probe sem.
1874 * Zero on success, negative errno otherwise.
1877 int ata_bus_probe(struct ata_port *ap)
1879 unsigned int classes[ATA_MAX_DEVICES];
1880 int tries[ATA_MAX_DEVICES];
1882 struct ata_device *dev;
1886 for (i = 0; i < ATA_MAX_DEVICES; i++)
1887 tries[i] = ATA_PROBE_MAX_TRIES;
1890 /* reset and determine device classes */
1891 ap->ops->phy_reset(ap);
1893 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1894 dev = &ap->device[i];
1896 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1897 dev->class != ATA_DEV_UNKNOWN)
1898 classes[dev->devno] = dev->class;
1900 classes[dev->devno] = ATA_DEV_NONE;
1902 dev->class = ATA_DEV_UNKNOWN;
1907 /* after the reset the device state is PIO 0 and the controller
1908 state is undefined. Record the mode */
1910 for (i = 0; i < ATA_MAX_DEVICES; i++)
1911 ap->device[i].pio_mode = XFER_PIO_0;
1913 /* read IDENTIFY page and configure devices. We have to do the identify
1914 specific sequence bass-ackwards so that PDIAG- is released by
1917 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
1918 dev = &ap->device[i];
1921 dev->class = classes[i];
1923 if (!ata_dev_enabled(dev))
1926 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1932 /* Now ask for the cable type as PDIAG- should have been released */
1933 if (ap->ops->cable_detect)
1934 ap->cbl = ap->ops->cable_detect(ap);
1936 /* After the identify sequence we can now set up the devices. We do
1937 this in the normal order so that the user doesn't get confused */
1939 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1940 dev = &ap->device[i];
1941 if (!ata_dev_enabled(dev))
1944 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1945 rc = ata_dev_configure(dev);
1946 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
1951 /* configure transfer mode */
1952 rc = ata_set_mode(ap, &dev);
1956 for (i = 0; i < ATA_MAX_DEVICES; i++)
1957 if (ata_dev_enabled(&ap->device[i]))
1960 /* no device present, disable port */
1961 ata_port_disable(ap);
1962 ap->ops->port_disable(ap);
1966 tries[dev->devno]--;
1970 /* eeek, something went very wrong, give up */
1971 tries[dev->devno] = 0;
1975 /* give it just one more chance */
1976 tries[dev->devno] = min(tries[dev->devno], 1);
1978 if (tries[dev->devno] == 1) {
1979 /* This is the last chance, better to slow
1980 * down than lose it.
1982 sata_down_spd_limit(ap);
1983 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1987 if (!tries[dev->devno])
1988 ata_dev_disable(dev);
1994 * ata_port_probe - Mark port as enabled
1995 * @ap: Port for which we indicate enablement
1997 * Modify @ap data structure such that the system
1998 * thinks that the entire port is enabled.
2000 * LOCKING: host lock, or some other form of
2004 void ata_port_probe(struct ata_port *ap)
2006 ap->flags &= ~ATA_FLAG_DISABLED;
2010 * sata_print_link_status - Print SATA link status
2011 * @ap: SATA port to printk link status about
2013 * This function prints link speed and status of a SATA link.
2018 void sata_print_link_status(struct ata_port *ap)
2020 u32 sstatus, scontrol, tmp;
2022 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2024 sata_scr_read(ap, SCR_CONTROL, &scontrol);
2026 if (ata_port_online(ap)) {
2027 tmp = (sstatus >> 4) & 0xf;
2028 ata_port_printk(ap, KERN_INFO,
2029 "SATA link up %s (SStatus %X SControl %X)\n",
2030 sata_spd_string(tmp), sstatus, scontrol);
2032 ata_port_printk(ap, KERN_INFO,
2033 "SATA link down (SStatus %X SControl %X)\n",
2039 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2040 * @ap: SATA port associated with target SATA PHY.
2042 * This function issues commands to standard SATA Sxxx
2043 * PHY registers, to wake up the phy (and device), and
2044 * clear any reset condition.
2047 * PCI/etc. bus probe sem.
2050 void __sata_phy_reset(struct ata_port *ap)
2053 unsigned long timeout = jiffies + (HZ * 5);
2055 if (ap->flags & ATA_FLAG_SATA_RESET) {
2056 /* issue phy wake/reset */
2057 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2058 /* Couldn't find anything in SATA I/II specs, but
2059 * AHCI-1.1 10.4.2 says at least 1 ms. */
2062 /* phy wake/clear reset */
2063 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2065 /* wait for phy to become ready, if necessary */
2068 sata_scr_read(ap, SCR_STATUS, &sstatus);
2069 if ((sstatus & 0xf) != 1)
2071 } while (time_before(jiffies, timeout));
2073 /* print link status */
2074 sata_print_link_status(ap);
2076 /* TODO: phy layer with polling, timeouts, etc. */
2077 if (!ata_port_offline(ap))
2080 ata_port_disable(ap);
2082 if (ap->flags & ATA_FLAG_DISABLED)
2085 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2086 ata_port_disable(ap);
2090 ap->cbl = ATA_CBL_SATA;
2094 * sata_phy_reset - Reset SATA bus.
2095 * @ap: SATA port associated with target SATA PHY.
2097 * This function resets the SATA bus, and then probes
2098 * the bus for devices.
2101 * PCI/etc. bus probe sem.
2104 void sata_phy_reset(struct ata_port *ap)
2106 __sata_phy_reset(ap);
2107 if (ap->flags & ATA_FLAG_DISABLED)
2113 * ata_dev_pair - return other device on cable
2116 * Obtain the other device on the same cable, or if none is
2117 * present NULL is returned
2120 struct ata_device *ata_dev_pair(struct ata_device *adev)
2122 struct ata_port *ap = adev->ap;
2123 struct ata_device *pair = &ap->device[1 - adev->devno];
2124 if (!ata_dev_enabled(pair))
2130 * ata_port_disable - Disable port.
2131 * @ap: Port to be disabled.
2133 * Modify @ap data structure such that the system
2134 * thinks that the entire port is disabled, and should
2135 * never attempt to probe or communicate with devices
2138 * LOCKING: host lock, or some other form of
2142 void ata_port_disable(struct ata_port *ap)
2144 ap->device[0].class = ATA_DEV_NONE;
2145 ap->device[1].class = ATA_DEV_NONE;
2146 ap->flags |= ATA_FLAG_DISABLED;
2150 * sata_down_spd_limit - adjust SATA spd limit downward
2151 * @ap: Port to adjust SATA spd limit for
2153 * Adjust SATA spd limit of @ap downward. Note that this
2154 * function only adjusts the limit. The change must be applied
2155 * using sata_set_spd().
2158 * Inherited from caller.
2161 * 0 on success, negative errno on failure
2163 int sata_down_spd_limit(struct ata_port *ap)
2165 u32 sstatus, spd, mask;
2168 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2172 mask = ap->sata_spd_limit;
2175 highbit = fls(mask) - 1;
2176 mask &= ~(1 << highbit);
2178 spd = (sstatus >> 4) & 0xf;
2182 mask &= (1 << spd) - 1;
2186 ap->sata_spd_limit = mask;
2188 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2189 sata_spd_string(fls(mask)));
2194 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2198 if (ap->sata_spd_limit == UINT_MAX)
2201 limit = fls(ap->sata_spd_limit);
2203 spd = (*scontrol >> 4) & 0xf;
2204 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2206 return spd != limit;
2210 * sata_set_spd_needed - is SATA spd configuration needed
2211 * @ap: Port in question
2213 * Test whether the spd limit in SControl matches
2214 * @ap->sata_spd_limit. This function is used to determine
2215 * whether hardreset is necessary to apply SATA spd
2219 * Inherited from caller.
2222 * 1 if SATA spd configuration is needed, 0 otherwise.
2224 int sata_set_spd_needed(struct ata_port *ap)
2228 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2231 return __sata_set_spd_needed(ap, &scontrol);
2235 * sata_set_spd - set SATA spd according to spd limit
2236 * @ap: Port to set SATA spd for
2238 * Set SATA spd of @ap according to sata_spd_limit.
2241 * Inherited from caller.
2244 * 0 if spd doesn't need to be changed, 1 if spd has been
2245 * changed. Negative errno if SCR registers are inaccessible.
2247 int sata_set_spd(struct ata_port *ap)
2252 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2255 if (!__sata_set_spd_needed(ap, &scontrol))
2258 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2265 * This mode timing computation functionality is ported over from
2266 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2269 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2270 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2271 * for UDMA6, which is currently supported only by Maxtor drives.
2273 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2276 static const struct ata_timing ata_timing[] = {
2278 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2279 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2280 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2281 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2283 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2284 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2285 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2286 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2287 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2289 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2291 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2292 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2293 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2295 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2296 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2297 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2299 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2300 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2301 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2302 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2304 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2305 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2306 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2308 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2313 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2314 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2316 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2318 q->setup = EZ(t->setup * 1000, T);
2319 q->act8b = EZ(t->act8b * 1000, T);
2320 q->rec8b = EZ(t->rec8b * 1000, T);
2321 q->cyc8b = EZ(t->cyc8b * 1000, T);
2322 q->active = EZ(t->active * 1000, T);
2323 q->recover = EZ(t->recover * 1000, T);
2324 q->cycle = EZ(t->cycle * 1000, T);
2325 q->udma = EZ(t->udma * 1000, UT);
2328 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2329 struct ata_timing *m, unsigned int what)
2331 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2332 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2333 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2334 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2335 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2336 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2337 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2338 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2341 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2343 const struct ata_timing *t;
2345 for (t = ata_timing; t->mode != speed; t++)
2346 if (t->mode == 0xFF)
2351 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2352 struct ata_timing *t, int T, int UT)
2354 const struct ata_timing *s;
2355 struct ata_timing p;
2361 if (!(s = ata_timing_find_mode(speed)))
2364 memcpy(t, s, sizeof(*s));
2367 * If the drive is an EIDE drive, it can tell us it needs extended
2368 * PIO/MW_DMA cycle timing.
2371 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2372 memset(&p, 0, sizeof(p));
2373 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2374 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2375 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2376 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2377 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2379 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2383 * Convert the timing to bus clock counts.
2386 ata_timing_quantize(t, t, T, UT);
2389 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2390 * S.M.A.R.T * and some other commands. We have to ensure that the
2391 * DMA cycle timing is slower/equal than the fastest PIO timing.
2394 if (speed > XFER_PIO_6) {
2395 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2396 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2400 * Lengthen active & recovery time so that cycle time is correct.
2403 if (t->act8b + t->rec8b < t->cyc8b) {
2404 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2405 t->rec8b = t->cyc8b - t->act8b;
2408 if (t->active + t->recover < t->cycle) {
2409 t->active += (t->cycle - (t->active + t->recover)) / 2;
2410 t->recover = t->cycle - t->active;
2417 * ata_down_xfermask_limit - adjust dev xfer masks downward
2418 * @dev: Device to adjust xfer masks
2419 * @sel: ATA_DNXFER_* selector
2421 * Adjust xfer masks of @dev downward. Note that this function
2422 * does not apply the change. Invoking ata_set_mode() afterwards
2423 * will apply the limit.
2426 * Inherited from caller.
2429 * 0 on success, negative errno on failure
2431 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2434 unsigned int orig_mask, xfer_mask;
2435 unsigned int pio_mask, mwdma_mask, udma_mask;
2438 quiet = !!(sel & ATA_DNXFER_QUIET);
2439 sel &= ~ATA_DNXFER_QUIET;
2441 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2444 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2447 case ATA_DNXFER_PIO:
2448 highbit = fls(pio_mask) - 1;
2449 pio_mask &= ~(1 << highbit);
2452 case ATA_DNXFER_DMA:
2454 highbit = fls(udma_mask) - 1;
2455 udma_mask &= ~(1 << highbit);
2458 } else if (mwdma_mask) {
2459 highbit = fls(mwdma_mask) - 1;
2460 mwdma_mask &= ~(1 << highbit);
2466 case ATA_DNXFER_40C:
2467 udma_mask &= ATA_UDMA_MASK_40C;
2470 case ATA_DNXFER_FORCE_PIO0:
2472 case ATA_DNXFER_FORCE_PIO:
2481 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2483 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2487 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2488 snprintf(buf, sizeof(buf), "%s:%s",
2489 ata_mode_string(xfer_mask),
2490 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2492 snprintf(buf, sizeof(buf), "%s",
2493 ata_mode_string(xfer_mask));
2495 ata_dev_printk(dev, KERN_WARNING,
2496 "limiting speed to %s\n", buf);
2499 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2505 static int ata_dev_set_mode(struct ata_device *dev)
2507 struct ata_eh_context *ehc = &dev->ap->eh_context;
2508 unsigned int err_mask;
2511 dev->flags &= ~ATA_DFLAG_PIO;
2512 if (dev->xfer_shift == ATA_SHIFT_PIO)
2513 dev->flags |= ATA_DFLAG_PIO;
2515 err_mask = ata_dev_set_xfermode(dev);
2516 /* Old CFA may refuse this command, which is just fine */
2517 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2518 err_mask &= ~AC_ERR_DEV;
2521 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2522 "(err_mask=0x%x)\n", err_mask);
2526 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2527 rc = ata_dev_revalidate(dev, 0);
2528 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2532 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2533 dev->xfer_shift, (int)dev->xfer_mode);
2535 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2536 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2541 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2542 * @ap: port on which timings will be programmed
2543 * @r_failed_dev: out paramter for failed device
2545 * Standard implementation of the function used to tune and set
2546 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2547 * ata_dev_set_mode() fails, pointer to the failing device is
2548 * returned in @r_failed_dev.
2551 * PCI/etc. bus probe sem.
2554 * 0 on success, negative errno otherwise
2557 int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2559 struct ata_device *dev;
2560 int i, rc = 0, used_dma = 0, found = 0;
2563 /* step 1: calculate xfer_mask */
2564 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2565 unsigned int pio_mask, dma_mask;
2567 dev = &ap->device[i];
2569 if (!ata_dev_enabled(dev))
2572 ata_dev_xfermask(dev);
2574 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2575 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2576 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2577 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2586 /* step 2: always set host PIO timings */
2587 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2588 dev = &ap->device[i];
2589 if (!ata_dev_enabled(dev))
2592 if (!dev->pio_mode) {
2593 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2598 dev->xfer_mode = dev->pio_mode;
2599 dev->xfer_shift = ATA_SHIFT_PIO;
2600 if (ap->ops->set_piomode)
2601 ap->ops->set_piomode(ap, dev);
2604 /* step 3: set host DMA timings */
2605 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2606 dev = &ap->device[i];
2608 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2611 dev->xfer_mode = dev->dma_mode;
2612 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2613 if (ap->ops->set_dmamode)
2614 ap->ops->set_dmamode(ap, dev);
2617 /* step 4: update devices' xfer mode */
2618 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2619 dev = &ap->device[i];
2621 /* don't update suspended devices' xfer mode */
2622 if (!ata_dev_ready(dev))
2625 rc = ata_dev_set_mode(dev);
2630 /* Record simplex status. If we selected DMA then the other
2631 * host channels are not permitted to do so.
2633 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2634 ap->host->simplex_claimed = ap;
2636 /* step5: chip specific finalisation */
2637 if (ap->ops->post_set_mode)
2638 ap->ops->post_set_mode(ap);
2641 *r_failed_dev = dev;
2646 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2647 * @ap: port on which timings will be programmed
2648 * @r_failed_dev: out paramter for failed device
2650 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2651 * ata_set_mode() fails, pointer to the failing device is
2652 * returned in @r_failed_dev.
2655 * PCI/etc. bus probe sem.
2658 * 0 on success, negative errno otherwise
2660 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2662 /* has private set_mode? */
2663 if (ap->ops->set_mode)
2664 return ap->ops->set_mode(ap, r_failed_dev);
2665 return ata_do_set_mode(ap, r_failed_dev);
2669 * ata_tf_to_host - issue ATA taskfile to host controller
2670 * @ap: port to which command is being issued
2671 * @tf: ATA taskfile register set
2673 * Issues ATA taskfile register set to ATA host controller,
2674 * with proper synchronization with interrupt handler and
2678 * spin_lock_irqsave(host lock)
2681 static inline void ata_tf_to_host(struct ata_port *ap,
2682 const struct ata_taskfile *tf)
2684 ap->ops->tf_load(ap, tf);
2685 ap->ops->exec_command(ap, tf);
2689 * ata_busy_sleep - sleep until BSY clears, or timeout
2690 * @ap: port containing status register to be polled
2691 * @tmout_pat: impatience timeout
2692 * @tmout: overall timeout
2694 * Sleep until ATA Status register bit BSY clears,
2695 * or a timeout occurs.
2698 * Kernel thread context (may sleep).
2701 * 0 on success, -errno otherwise.
2703 int ata_busy_sleep(struct ata_port *ap,
2704 unsigned long tmout_pat, unsigned long tmout)
2706 unsigned long timer_start, timeout;
2709 status = ata_busy_wait(ap, ATA_BUSY, 300);
2710 timer_start = jiffies;
2711 timeout = timer_start + tmout_pat;
2712 while (status != 0xff && (status & ATA_BUSY) &&
2713 time_before(jiffies, timeout)) {
2715 status = ata_busy_wait(ap, ATA_BUSY, 3);
2718 if (status != 0xff && (status & ATA_BUSY))
2719 ata_port_printk(ap, KERN_WARNING,
2720 "port is slow to respond, please be patient "
2721 "(Status 0x%x)\n", status);
2723 timeout = timer_start + tmout;
2724 while (status != 0xff && (status & ATA_BUSY) &&
2725 time_before(jiffies, timeout)) {
2727 status = ata_chk_status(ap);
2733 if (status & ATA_BUSY) {
2734 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2735 "(%lu secs, Status 0x%x)\n",
2736 tmout / HZ, status);
2743 static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2745 struct ata_ioports *ioaddr = &ap->ioaddr;
2746 unsigned int dev0 = devmask & (1 << 0);
2747 unsigned int dev1 = devmask & (1 << 1);
2748 unsigned long timeout;
2750 /* if device 0 was found in ata_devchk, wait for its
2754 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2756 /* if device 1 was found in ata_devchk, wait for
2757 * register access, then wait for BSY to clear
2759 timeout = jiffies + ATA_TMOUT_BOOT;
2763 ap->ops->dev_select(ap, 1);
2764 nsect = ioread8(ioaddr->nsect_addr);
2765 lbal = ioread8(ioaddr->lbal_addr);
2766 if ((nsect == 1) && (lbal == 1))
2768 if (time_after(jiffies, timeout)) {
2772 msleep(50); /* give drive a breather */
2775 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2777 /* is all this really necessary? */
2778 ap->ops->dev_select(ap, 0);
2780 ap->ops->dev_select(ap, 1);
2782 ap->ops->dev_select(ap, 0);
2785 static unsigned int ata_bus_softreset(struct ata_port *ap,
2786 unsigned int devmask)
2788 struct ata_ioports *ioaddr = &ap->ioaddr;
2790 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
2792 /* software reset. causes dev0 to be selected */
2793 iowrite8(ap->ctl, ioaddr->ctl_addr);
2794 udelay(20); /* FIXME: flush */
2795 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2796 udelay(20); /* FIXME: flush */
2797 iowrite8(ap->ctl, ioaddr->ctl_addr);
2799 /* spec mandates ">= 2ms" before checking status.
2800 * We wait 150ms, because that was the magic delay used for
2801 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2802 * between when the ATA command register is written, and then
2803 * status is checked. Because waiting for "a while" before
2804 * checking status is fine, post SRST, we perform this magic
2805 * delay here as well.
2807 * Old drivers/ide uses the 2mS rule and then waits for ready
2811 /* Before we perform post reset processing we want to see if
2812 * the bus shows 0xFF because the odd clown forgets the D7
2813 * pulldown resistor.
2815 if (ata_check_status(ap) == 0xFF)
2818 ata_bus_post_reset(ap, devmask);
2824 * ata_bus_reset - reset host port and associated ATA channel
2825 * @ap: port to reset
2827 * This is typically the first time we actually start issuing
2828 * commands to the ATA channel. We wait for BSY to clear, then
2829 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2830 * result. Determine what devices, if any, are on the channel
2831 * by looking at the device 0/1 error register. Look at the signature
2832 * stored in each device's taskfile registers, to determine if
2833 * the device is ATA or ATAPI.
2836 * PCI/etc. bus probe sem.
2837 * Obtains host lock.
2840 * Sets ATA_FLAG_DISABLED if bus reset fails.
2843 void ata_bus_reset(struct ata_port *ap)
2845 struct ata_ioports *ioaddr = &ap->ioaddr;
2846 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2848 unsigned int dev0, dev1 = 0, devmask = 0;
2850 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
2852 /* determine if device 0/1 are present */
2853 if (ap->flags & ATA_FLAG_SATA_RESET)
2856 dev0 = ata_devchk(ap, 0);
2858 dev1 = ata_devchk(ap, 1);
2862 devmask |= (1 << 0);
2864 devmask |= (1 << 1);
2866 /* select device 0 again */
2867 ap->ops->dev_select(ap, 0);
2869 /* issue bus reset */
2870 if (ap->flags & ATA_FLAG_SRST)
2871 if (ata_bus_softreset(ap, devmask))
2875 * determine by signature whether we have ATA or ATAPI devices
2877 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
2878 if ((slave_possible) && (err != 0x81))
2879 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
2881 /* re-enable interrupts */
2882 ap->ops->irq_on(ap);
2884 /* is double-select really necessary? */
2885 if (ap->device[1].class != ATA_DEV_NONE)
2886 ap->ops->dev_select(ap, 1);
2887 if (ap->device[0].class != ATA_DEV_NONE)
2888 ap->ops->dev_select(ap, 0);
2890 /* if no devices were detected, disable this port */
2891 if ((ap->device[0].class == ATA_DEV_NONE) &&
2892 (ap->device[1].class == ATA_DEV_NONE))
2895 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2896 /* set up device control for ATA_FLAG_SATA_RESET */
2897 iowrite8(ap->ctl, ioaddr->ctl_addr);
2904 ata_port_printk(ap, KERN_ERR, "disabling port\n");
2905 ap->ops->port_disable(ap);
2911 * sata_phy_debounce - debounce SATA phy status
2912 * @ap: ATA port to debounce SATA phy status for
2913 * @params: timing parameters { interval, duratinon, timeout } in msec
2915 * Make sure SStatus of @ap reaches stable state, determined by
2916 * holding the same value where DET is not 1 for @duration polled
2917 * every @interval, before @timeout. Timeout constraints the
2918 * beginning of the stable state. Because, after hot unplugging,
2919 * DET gets stuck at 1 on some controllers, this functions waits
2920 * until timeout then returns 0 if DET is stable at 1.
2923 * Kernel thread context (may sleep)
2926 * 0 on success, -errno on failure.
2928 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
2930 unsigned long interval_msec = params[0];
2931 unsigned long duration = params[1] * HZ / 1000;
2932 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2933 unsigned long last_jiffies;
2937 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2942 last_jiffies = jiffies;
2945 msleep(interval_msec);
2946 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2952 if (cur == 1 && time_before(jiffies, timeout))
2954 if (time_after(jiffies, last_jiffies + duration))
2959 /* unstable, start over */
2961 last_jiffies = jiffies;
2964 if (time_after(jiffies, timeout))
2970 * sata_phy_resume - resume SATA phy
2971 * @ap: ATA port to resume SATA phy for
2972 * @params: timing parameters { interval, duratinon, timeout } in msec
2974 * Resume SATA phy of @ap and debounce it.
2977 * Kernel thread context (may sleep)
2980 * 0 on success, -errno on failure.
2982 int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2987 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2990 scontrol = (scontrol & 0x0f0) | 0x300;
2992 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2995 /* Some PHYs react badly if SStatus is pounded immediately
2996 * after resuming. Delay 200ms before debouncing.
3000 return sata_phy_debounce(ap, params);
3003 static void ata_wait_spinup(struct ata_port *ap)
3005 struct ata_eh_context *ehc = &ap->eh_context;
3006 unsigned long end, secs;
3009 /* first, debounce phy if SATA */
3010 if (ap->cbl == ATA_CBL_SATA) {
3011 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
3013 /* if debounced successfully and offline, no need to wait */
3014 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
3018 /* okay, let's give the drive time to spin up */
3019 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
3020 secs = ((end - jiffies) + HZ - 1) / HZ;
3022 if (time_after(jiffies, end))
3026 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
3027 "(%lu secs)\n", secs);
3029 schedule_timeout_uninterruptible(end - jiffies);
3033 * ata_std_prereset - prepare for reset
3034 * @ap: ATA port to be reset
3036 * @ap is about to be reset. Initialize it.
3039 * Kernel thread context (may sleep)
3042 * 0 on success, -errno otherwise.
3044 int ata_std_prereset(struct ata_port *ap)
3046 struct ata_eh_context *ehc = &ap->eh_context;
3047 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3050 /* handle link resume & hotplug spinup */
3051 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3052 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3053 ehc->i.action |= ATA_EH_HARDRESET;
3055 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
3056 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
3057 ata_wait_spinup(ap);
3059 /* if we're about to do hardreset, nothing more to do */
3060 if (ehc->i.action & ATA_EH_HARDRESET)
3063 /* if SATA, resume phy */
3064 if (ap->cbl == ATA_CBL_SATA) {
3065 rc = sata_phy_resume(ap, timing);
3066 if (rc && rc != -EOPNOTSUPP) {
3067 /* phy resume failed */
3068 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3069 "link for reset (errno=%d)\n", rc);
3074 /* Wait for !BSY if the controller can wait for the first D2H
3075 * Reg FIS and we don't know that no device is attached.
3077 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
3078 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
3084 * ata_std_softreset - reset host port via ATA SRST
3085 * @ap: port to reset
3086 * @classes: resulting classes of attached devices
3088 * Reset host port using ATA SRST.
3091 * Kernel thread context (may sleep)
3094 * 0 on success, -errno otherwise.
3096 int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
3098 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3099 unsigned int devmask = 0, err_mask;
3104 if (ata_port_offline(ap)) {
3105 classes[0] = ATA_DEV_NONE;
3109 /* determine if device 0/1 are present */
3110 if (ata_devchk(ap, 0))
3111 devmask |= (1 << 0);
3112 if (slave_possible && ata_devchk(ap, 1))
3113 devmask |= (1 << 1);
3115 /* select device 0 again */
3116 ap->ops->dev_select(ap, 0);
3118 /* issue bus reset */
3119 DPRINTK("about to softreset, devmask=%x\n", devmask);
3120 err_mask = ata_bus_softreset(ap, devmask);
3122 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3127 /* determine by signature whether we have ATA or ATAPI devices */
3128 classes[0] = ata_dev_try_classify(ap, 0, &err);
3129 if (slave_possible && err != 0x81)
3130 classes[1] = ata_dev_try_classify(ap, 1, &err);
3133 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3138 * sata_port_hardreset - reset port via SATA phy reset
3139 * @ap: port to reset
3140 * @timing: timing parameters { interval, duratinon, timeout } in msec
3142 * SATA phy-reset host port using DET bits of SControl register.
3145 * Kernel thread context (may sleep)
3148 * 0 on success, -errno otherwise.
3150 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
3157 if (sata_set_spd_needed(ap)) {
3158 /* SATA spec says nothing about how to reconfigure
3159 * spd. To be on the safe side, turn off phy during
3160 * reconfiguration. This works for at least ICH7 AHCI
3163 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3166 scontrol = (scontrol & 0x0f0) | 0x304;
3168 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3174 /* issue phy wake/reset */
3175 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3178 scontrol = (scontrol & 0x0f0) | 0x301;
3180 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3183 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3184 * 10.4.2 says at least 1 ms.
3188 /* bring phy back */
3189 rc = sata_phy_resume(ap, timing);
3191 DPRINTK("EXIT, rc=%d\n", rc);
3196 * sata_std_hardreset - reset host port via SATA phy reset
3197 * @ap: port to reset
3198 * @class: resulting class of attached device
3200 * SATA phy-reset host port using DET bits of SControl register,
3201 * wait for !BSY and classify the attached device.
3204 * Kernel thread context (may sleep)
3207 * 0 on success, -errno otherwise.
3209 int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3211 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3217 rc = sata_port_hardreset(ap, timing);
3219 ata_port_printk(ap, KERN_ERR,
3220 "COMRESET failed (errno=%d)\n", rc);
3224 /* TODO: phy layer with polling, timeouts, etc. */
3225 if (ata_port_offline(ap)) {
3226 *class = ATA_DEV_NONE;
3227 DPRINTK("EXIT, link offline\n");
3231 /* wait a while before checking status, see SRST for more info */
3234 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
3235 ata_port_printk(ap, KERN_ERR,
3236 "COMRESET failed (device not ready)\n");
3240 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3242 *class = ata_dev_try_classify(ap, 0, NULL);
3244 DPRINTK("EXIT, class=%u\n", *class);
3249 * ata_std_postreset - standard postreset callback
3250 * @ap: the target ata_port
3251 * @classes: classes of attached devices
3253 * This function is invoked after a successful reset. Note that
3254 * the device might have been reset more than once using
3255 * different reset methods before postreset is invoked.
3258 * Kernel thread context (may sleep)
3260 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3266 /* print link status */
3267 sata_print_link_status(ap);
3270 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3271 sata_scr_write(ap, SCR_ERROR, serror);
3273 /* re-enable interrupts */
3274 if (!ap->ops->error_handler)
3275 ap->ops->irq_on(ap);
3277 /* is double-select really necessary? */
3278 if (classes[0] != ATA_DEV_NONE)
3279 ap->ops->dev_select(ap, 1);
3280 if (classes[1] != ATA_DEV_NONE)
3281 ap->ops->dev_select(ap, 0);
3283 /* bail out if no device is present */
3284 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3285 DPRINTK("EXIT, no device\n");
3289 /* set up device control */
3290 if (ap->ioaddr.ctl_addr)
3291 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3297 * ata_dev_same_device - Determine whether new ID matches configured device
3298 * @dev: device to compare against
3299 * @new_class: class of the new device
3300 * @new_id: IDENTIFY page of the new device
3302 * Compare @new_class and @new_id against @dev and determine
3303 * whether @dev is the device indicated by @new_class and
3310 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3312 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3315 const u16 *old_id = dev->id;
3316 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3317 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3320 if (dev->class != new_class) {
3321 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3322 dev->class, new_class);
3326 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3327 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3328 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3329 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3330 new_n_sectors = ata_id_n_sectors(new_id);
3332 if (strcmp(model[0], model[1])) {
3333 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3334 "'%s' != '%s'\n", model[0], model[1]);
3338 if (strcmp(serial[0], serial[1])) {
3339 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3340 "'%s' != '%s'\n", serial[0], serial[1]);
3344 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
3345 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3347 (unsigned long long)dev->n_sectors,
3348 (unsigned long long)new_n_sectors);
3356 * ata_dev_revalidate - Revalidate ATA device
3357 * @dev: device to revalidate
3358 * @readid_flags: read ID flags
3360 * Re-read IDENTIFY page and make sure @dev is still attached to
3364 * Kernel thread context (may sleep)
3367 * 0 on success, negative errno otherwise
3369 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3371 unsigned int class = dev->class;
3372 u16 *id = (void *)dev->ap->sector_buf;
3375 if (!ata_dev_enabled(dev)) {
3381 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3385 /* is the device still there? */
3386 if (!ata_dev_same_device(dev, class, id)) {
3391 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3393 /* configure device according to the new ID */
3394 rc = ata_dev_configure(dev);
3399 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3403 struct ata_blacklist_entry {
3404 const char *model_num;
3405 const char *model_rev;
3406 unsigned long horkage;
3409 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3410 /* Devices with DMA related problems under Linux */
3411 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3412 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3413 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3414 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3415 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3416 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3417 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3418 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3419 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3420 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3421 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3422 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3423 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3424 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3425 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3426 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3427 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3428 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3429 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3430 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3431 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3432 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3433 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3434 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3435 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3436 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3437 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3438 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3439 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3441 /* Weird ATAPI devices */
3442 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3443 ATA_HORKAGE_DMA_RW_ONLY },
3445 /* Devices we expect to fail diagnostics */
3447 /* Devices where NCQ should be avoided */
3449 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3450 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3451 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3453 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
3454 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3455 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3456 /* Blacklist entries taken from Silicon Image 3124/3132
3457 Windows driver .inf file - also several Linux problem reports */
3458 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3459 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3460 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3462 /* Devices with NCQ limits */
3468 unsigned long ata_device_blacklisted(const struct ata_device *dev)
3470 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3471 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3472 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3474 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3475 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3477 while (ad->model_num) {
3478 if (!strcmp(ad->model_num, model_num)) {
3479 if (ad->model_rev == NULL)
3481 if (!strcmp(ad->model_rev, model_rev))
3489 static int ata_dma_blacklisted(const struct ata_device *dev)
3491 /* We don't support polling DMA.
3492 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3493 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3495 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3496 (dev->flags & ATA_DFLAG_CDB_INTR))
3498 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3502 * ata_dev_xfermask - Compute supported xfermask of the given device
3503 * @dev: Device to compute xfermask for
3505 * Compute supported xfermask of @dev and store it in
3506 * dev->*_mask. This function is responsible for applying all
3507 * known limits including host controller limits, device
3513 static void ata_dev_xfermask(struct ata_device *dev)
3515 struct ata_port *ap = dev->ap;
3516 struct ata_host *host = ap->host;
3517 unsigned long xfer_mask;
3519 /* controller modes available */
3520 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3521 ap->mwdma_mask, ap->udma_mask);
3523 /* drive modes available */
3524 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3525 dev->mwdma_mask, dev->udma_mask);
3526 xfer_mask &= ata_id_xfermask(dev->id);
3529 * CFA Advanced TrueIDE timings are not allowed on a shared
3532 if (ata_dev_pair(dev)) {
3533 /* No PIO5 or PIO6 */
3534 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3535 /* No MWDMA3 or MWDMA 4 */
3536 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3539 if (ata_dma_blacklisted(dev)) {
3540 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3541 ata_dev_printk(dev, KERN_WARNING,
3542 "device is on DMA blacklist, disabling DMA\n");
3545 if ((host->flags & ATA_HOST_SIMPLEX) &&
3546 host->simplex_claimed && host->simplex_claimed != ap) {
3547 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3548 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3549 "other device, disabling DMA\n");
3552 if (ap->flags & ATA_FLAG_NO_IORDY)
3553 xfer_mask &= ata_pio_mask_no_iordy(dev);
3555 if (ap->ops->mode_filter)
3556 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3558 /* Apply cable rule here. Don't apply it early because when
3559 * we handle hot plug the cable type can itself change.
3560 * Check this last so that we know if the transfer rate was
3561 * solely limited by the cable.
3562 * Unknown or 80 wire cables reported host side are checked
3563 * drive side as well. Cases where we know a 40wire cable
3564 * is used safely for 80 are not checked here.
3566 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3567 /* UDMA/44 or higher would be available */
3568 if((ap->cbl == ATA_CBL_PATA40) ||
3569 (ata_drive_40wire(dev->id) &&
3570 (ap->cbl == ATA_CBL_PATA_UNK ||
3571 ap->cbl == ATA_CBL_PATA80))) {
3572 ata_dev_printk(dev, KERN_WARNING,
3573 "limited to UDMA/33 due to 40-wire cable\n");
3574 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3577 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3578 &dev->mwdma_mask, &dev->udma_mask);
3582 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3583 * @dev: Device to which command will be sent
3585 * Issue SET FEATURES - XFER MODE command to device @dev
3589 * PCI/etc. bus probe sem.
3592 * 0 on success, AC_ERR_* mask otherwise.
3595 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3597 struct ata_taskfile tf;
3598 unsigned int err_mask;
3600 /* set up set-features taskfile */
3601 DPRINTK("set features - xfer mode\n");
3603 ata_tf_init(dev, &tf);
3604 tf.command = ATA_CMD_SET_FEATURES;
3605 tf.feature = SETFEATURES_XFER;
3606 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3607 tf.protocol = ATA_PROT_NODATA;
3608 tf.nsect = dev->xfer_mode;
3610 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3612 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3617 * ata_dev_init_params - Issue INIT DEV PARAMS command
3618 * @dev: Device to which command will be sent
3619 * @heads: Number of heads (taskfile parameter)
3620 * @sectors: Number of sectors (taskfile parameter)
3623 * Kernel thread context (may sleep)
3626 * 0 on success, AC_ERR_* mask otherwise.
3628 static unsigned int ata_dev_init_params(struct ata_device *dev,
3629 u16 heads, u16 sectors)
3631 struct ata_taskfile tf;
3632 unsigned int err_mask;
3634 /* Number of sectors per track 1-255. Number of heads 1-16 */
3635 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3636 return AC_ERR_INVALID;
3638 /* set up init dev params taskfile */
3639 DPRINTK("init dev params \n");
3641 ata_tf_init(dev, &tf);
3642 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3643 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3644 tf.protocol = ATA_PROT_NODATA;
3646 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
3648 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3650 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3655 * ata_sg_clean - Unmap DMA memory associated with command
3656 * @qc: Command containing DMA memory to be released
3658 * Unmap all mapped DMA memory associated with this command.
3661 * spin_lock_irqsave(host lock)
3663 void ata_sg_clean(struct ata_queued_cmd *qc)
3665 struct ata_port *ap = qc->ap;
3666 struct scatterlist *sg = qc->__sg;
3667 int dir = qc->dma_dir;
3668 void *pad_buf = NULL;
3670 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3671 WARN_ON(sg == NULL);
3673 if (qc->flags & ATA_QCFLAG_SINGLE)
3674 WARN_ON(qc->n_elem > 1);
3676 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
3678 /* if we padded the buffer out to 32-bit bound, and data
3679 * xfer direction is from-device, we must copy from the
3680 * pad buffer back into the supplied buffer
3682 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3683 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3685 if (qc->flags & ATA_QCFLAG_SG) {
3687 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
3688 /* restore last sg */
3689 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3691 struct scatterlist *psg = &qc->pad_sgent;
3692 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3693 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
3694 kunmap_atomic(addr, KM_IRQ0);
3698 dma_unmap_single(ap->dev,
3699 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3702 sg->length += qc->pad_len;
3704 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3705 pad_buf, qc->pad_len);
3708 qc->flags &= ~ATA_QCFLAG_DMAMAP;
3713 * ata_fill_sg - Fill PCI IDE PRD table
3714 * @qc: Metadata associated with taskfile to be transferred
3716 * Fill PCI IDE PRD (scatter-gather) table with segments
3717 * associated with the current disk command.
3720 * spin_lock_irqsave(host lock)
3723 static void ata_fill_sg(struct ata_queued_cmd *qc)
3725 struct ata_port *ap = qc->ap;
3726 struct scatterlist *sg;
3729 WARN_ON(qc->__sg == NULL);
3730 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
3733 ata_for_each_sg(sg, qc) {
3737 /* determine if physical DMA addr spans 64K boundary.
3738 * Note h/w doesn't support 64-bit, so we unconditionally
3739 * truncate dma_addr_t to u32.
3741 addr = (u32) sg_dma_address(sg);
3742 sg_len = sg_dma_len(sg);
3745 offset = addr & 0xffff;
3747 if ((offset + sg_len) > 0x10000)
3748 len = 0x10000 - offset;
3750 ap->prd[idx].addr = cpu_to_le32(addr);
3751 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3752 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3761 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3764 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3765 * @qc: Metadata associated with taskfile to check
3767 * Allow low-level driver to filter ATA PACKET commands, returning
3768 * a status indicating whether or not it is OK to use DMA for the
3769 * supplied PACKET command.
3772 * spin_lock_irqsave(host lock)
3774 * RETURNS: 0 when ATAPI DMA can be used
3777 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3779 struct ata_port *ap = qc->ap;
3780 int rc = 0; /* Assume ATAPI DMA is OK by default */
3782 /* some drives can only do ATAPI DMA on read/write */
3783 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
3784 struct scsi_cmnd *cmd = qc->scsicmd;
3785 u8 *scsicmd = cmd->cmnd;
3787 switch (scsicmd[0]) {
3794 /* atapi dma maybe ok */
3797 /* turn off atapi dma */
3802 if (ap->ops->check_atapi_dma)
3803 rc = ap->ops->check_atapi_dma(qc);
3808 * ata_qc_prep - Prepare taskfile for submission
3809 * @qc: Metadata associated with taskfile to be prepared
3811 * Prepare ATA taskfile for submission.
3814 * spin_lock_irqsave(host lock)
3816 void ata_qc_prep(struct ata_queued_cmd *qc)
3818 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3824 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3827 * ata_sg_init_one - Associate command with memory buffer
3828 * @qc: Command to be associated
3829 * @buf: Memory buffer
3830 * @buflen: Length of memory buffer, in bytes.
3832 * Initialize the data-related elements of queued_cmd @qc
3833 * to point to a single memory buffer, @buf of byte length @buflen.
3836 * spin_lock_irqsave(host lock)
3839 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3841 qc->flags |= ATA_QCFLAG_SINGLE;
3843 qc->__sg = &qc->sgent;
3845 qc->orig_n_elem = 1;
3847 qc->nbytes = buflen;
3849 sg_init_one(&qc->sgent, buf, buflen);
3853 * ata_sg_init - Associate command with scatter-gather table.
3854 * @qc: Command to be associated
3855 * @sg: Scatter-gather table.
3856 * @n_elem: Number of elements in s/g table.
3858 * Initialize the data-related elements of queued_cmd @qc
3859 * to point to a scatter-gather table @sg, containing @n_elem
3863 * spin_lock_irqsave(host lock)
3866 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3867 unsigned int n_elem)
3869 qc->flags |= ATA_QCFLAG_SG;
3871 qc->n_elem = n_elem;
3872 qc->orig_n_elem = n_elem;
3876 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3877 * @qc: Command with memory buffer to be mapped.
3879 * DMA-map the memory buffer associated with queued_cmd @qc.
3882 * spin_lock_irqsave(host lock)
3885 * Zero on success, negative on error.
3888 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3890 struct ata_port *ap = qc->ap;
3891 int dir = qc->dma_dir;
3892 struct scatterlist *sg = qc->__sg;
3893 dma_addr_t dma_address;
3896 /* we must lengthen transfers to end on a 32-bit boundary */
3897 qc->pad_len = sg->length & 3;
3899 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3900 struct scatterlist *psg = &qc->pad_sgent;
3902 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3904 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3906 if (qc->tf.flags & ATA_TFLAG_WRITE)
3907 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3910 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3911 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3913 sg->length -= qc->pad_len;
3914 if (sg->length == 0)
3917 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3918 sg->length, qc->pad_len);
3926 dma_address = dma_map_single(ap->dev, qc->buf_virt,
3928 if (dma_mapping_error(dma_address)) {
3930 sg->length += qc->pad_len;
3934 sg_dma_address(sg) = dma_address;
3935 sg_dma_len(sg) = sg->length;
3938 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3939 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3945 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3946 * @qc: Command with scatter-gather table to be mapped.
3948 * DMA-map the scatter-gather table associated with queued_cmd @qc.
3951 * spin_lock_irqsave(host lock)
3954 * Zero on success, negative on error.
3958 static int ata_sg_setup(struct ata_queued_cmd *qc)
3960 struct ata_port *ap = qc->ap;
3961 struct scatterlist *sg = qc->__sg;
3962 struct scatterlist *lsg = &sg[qc->n_elem - 1];
3963 int n_elem, pre_n_elem, dir, trim_sg = 0;
3965 VPRINTK("ENTER, ata%u\n", ap->print_id);
3966 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
3968 /* we must lengthen transfers to end on a 32-bit boundary */
3969 qc->pad_len = lsg->length & 3;
3971 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3972 struct scatterlist *psg = &qc->pad_sgent;
3973 unsigned int offset;
3975 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
3977 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3980 * psg->page/offset are used to copy to-be-written
3981 * data in this function or read data in ata_sg_clean.
3983 offset = lsg->offset + lsg->length - qc->pad_len;
3984 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3985 psg->offset = offset_in_page(offset);
3987 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3988 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3989 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
3990 kunmap_atomic(addr, KM_IRQ0);
3993 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3994 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3996 lsg->length -= qc->pad_len;
3997 if (lsg->length == 0)
4000 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4001 qc->n_elem - 1, lsg->length, qc->pad_len);
4004 pre_n_elem = qc->n_elem;
4005 if (trim_sg && pre_n_elem)
4014 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4016 /* restore last sg */
4017 lsg->length += qc->pad_len;
4021 DPRINTK("%d sg elements mapped\n", n_elem);
4024 qc->n_elem = n_elem;
4030 * swap_buf_le16 - swap halves of 16-bit words in place
4031 * @buf: Buffer to swap
4032 * @buf_words: Number of 16-bit words in buffer.
4034 * Swap halves of 16-bit words if needed to convert from
4035 * little-endian byte order to native cpu byte order, or
4039 * Inherited from caller.
4041 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4046 for (i = 0; i < buf_words; i++)
4047 buf[i] = le16_to_cpu(buf[i]);
4048 #endif /* __BIG_ENDIAN */
4052 * ata_data_xfer - Transfer data by PIO
4053 * @adev: device to target
4055 * @buflen: buffer length
4056 * @write_data: read/write
4058 * Transfer data from/to the device data register by PIO.
4061 * Inherited from caller.
4063 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4064 unsigned int buflen, int write_data)
4066 struct ata_port *ap = adev->ap;
4067 unsigned int words = buflen >> 1;
4069 /* Transfer multiple of 2 bytes */
4071 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4073 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4075 /* Transfer trailing 1 byte, if any. */
4076 if (unlikely(buflen & 0x01)) {
4077 u16 align_buf[1] = { 0 };
4078 unsigned char *trailing_buf = buf + buflen - 1;
4081 memcpy(align_buf, trailing_buf, 1);
4082 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4084 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4085 memcpy(trailing_buf, align_buf, 1);
4091 * ata_data_xfer_noirq - Transfer data by PIO
4092 * @adev: device to target
4094 * @buflen: buffer length
4095 * @write_data: read/write
4097 * Transfer data from/to the device data register by PIO. Do the
4098 * transfer with interrupts disabled.
4101 * Inherited from caller.
4103 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4104 unsigned int buflen, int write_data)
4106 unsigned long flags;
4107 local_irq_save(flags);
4108 ata_data_xfer(adev, buf, buflen, write_data);
4109 local_irq_restore(flags);
4114 * ata_pio_sector - Transfer a sector of data.
4115 * @qc: Command on going
4117 * Transfer qc->sect_size bytes of data from/to the ATA device.
4120 * Inherited from caller.
4123 static void ata_pio_sector(struct ata_queued_cmd *qc)
4125 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4126 struct scatterlist *sg = qc->__sg;
4127 struct ata_port *ap = qc->ap;
4129 unsigned int offset;
4132 if (qc->curbytes == qc->nbytes - qc->sect_size)
4133 ap->hsm_task_state = HSM_ST_LAST;
4135 page = sg[qc->cursg].page;
4136 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4138 /* get the current page and offset */
4139 page = nth_page(page, (offset >> PAGE_SHIFT));
4140 offset %= PAGE_SIZE;
4142 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4144 if (PageHighMem(page)) {
4145 unsigned long flags;
4147 /* FIXME: use a bounce buffer */
4148 local_irq_save(flags);
4149 buf = kmap_atomic(page, KM_IRQ0);
4151 /* do the actual data transfer */
4152 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4154 kunmap_atomic(buf, KM_IRQ0);
4155 local_irq_restore(flags);
4157 buf = page_address(page);
4158 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4161 qc->curbytes += qc->sect_size;
4162 qc->cursg_ofs += qc->sect_size;
4164 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4171 * ata_pio_sectors - Transfer one or many sectors.
4172 * @qc: Command on going
4174 * Transfer one or many sectors of data from/to the
4175 * ATA device for the DRQ request.
4178 * Inherited from caller.
4181 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4183 if (is_multi_taskfile(&qc->tf)) {
4184 /* READ/WRITE MULTIPLE */
4187 WARN_ON(qc->dev->multi_count == 0);
4189 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4190 qc->dev->multi_count);
4198 * atapi_send_cdb - Write CDB bytes to hardware
4199 * @ap: Port to which ATAPI device is attached.
4200 * @qc: Taskfile currently active
4202 * When device has indicated its readiness to accept
4203 * a CDB, this function is called. Send the CDB.
4209 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4212 DPRINTK("send cdb\n");
4213 WARN_ON(qc->dev->cdb_len < 12);
4215 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4216 ata_altstatus(ap); /* flush */
4218 switch (qc->tf.protocol) {
4219 case ATA_PROT_ATAPI:
4220 ap->hsm_task_state = HSM_ST;
4222 case ATA_PROT_ATAPI_NODATA:
4223 ap->hsm_task_state = HSM_ST_LAST;
4225 case ATA_PROT_ATAPI_DMA:
4226 ap->hsm_task_state = HSM_ST_LAST;
4227 /* initiate bmdma */
4228 ap->ops->bmdma_start(qc);
4234 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4235 * @qc: Command on going
4236 * @bytes: number of bytes
4238 * Transfer Transfer data from/to the ATAPI device.
4241 * Inherited from caller.
4245 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4247 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4248 struct scatterlist *sg = qc->__sg;
4249 struct ata_port *ap = qc->ap;
4252 unsigned int offset, count;
4254 if (qc->curbytes + bytes >= qc->nbytes)
4255 ap->hsm_task_state = HSM_ST_LAST;
4258 if (unlikely(qc->cursg >= qc->n_elem)) {
4260 * The end of qc->sg is reached and the device expects
4261 * more data to transfer. In order not to overrun qc->sg
4262 * and fulfill length specified in the byte count register,
4263 * - for read case, discard trailing data from the device
4264 * - for write case, padding zero data to the device
4266 u16 pad_buf[1] = { 0 };
4267 unsigned int words = bytes >> 1;
4270 if (words) /* warning if bytes > 1 */
4271 ata_dev_printk(qc->dev, KERN_WARNING,
4272 "%u bytes trailing data\n", bytes);
4274 for (i = 0; i < words; i++)
4275 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4277 ap->hsm_task_state = HSM_ST_LAST;
4281 sg = &qc->__sg[qc->cursg];
4284 offset = sg->offset + qc->cursg_ofs;
4286 /* get the current page and offset */
4287 page = nth_page(page, (offset >> PAGE_SHIFT));
4288 offset %= PAGE_SIZE;
4290 /* don't overrun current sg */
4291 count = min(sg->length - qc->cursg_ofs, bytes);
4293 /* don't cross page boundaries */
4294 count = min(count, (unsigned int)PAGE_SIZE - offset);
4296 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4298 if (PageHighMem(page)) {
4299 unsigned long flags;
4301 /* FIXME: use bounce buffer */
4302 local_irq_save(flags);
4303 buf = kmap_atomic(page, KM_IRQ0);
4305 /* do the actual data transfer */
4306 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4308 kunmap_atomic(buf, KM_IRQ0);
4309 local_irq_restore(flags);
4311 buf = page_address(page);
4312 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4316 qc->curbytes += count;
4317 qc->cursg_ofs += count;
4319 if (qc->cursg_ofs == sg->length) {
4329 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4330 * @qc: Command on going
4332 * Transfer Transfer data from/to the ATAPI device.
4335 * Inherited from caller.
4338 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4340 struct ata_port *ap = qc->ap;
4341 struct ata_device *dev = qc->dev;
4342 unsigned int ireason, bc_lo, bc_hi, bytes;
4343 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4345 /* Abuse qc->result_tf for temp storage of intermediate TF
4346 * here to save some kernel stack usage.
4347 * For normal completion, qc->result_tf is not relevant. For
4348 * error, qc->result_tf is later overwritten by ata_qc_complete().
4349 * So, the correctness of qc->result_tf is not affected.
4351 ap->ops->tf_read(ap, &qc->result_tf);
4352 ireason = qc->result_tf.nsect;
4353 bc_lo = qc->result_tf.lbam;
4354 bc_hi = qc->result_tf.lbah;
4355 bytes = (bc_hi << 8) | bc_lo;
4357 /* shall be cleared to zero, indicating xfer of data */
4358 if (ireason & (1 << 0))
4361 /* make sure transfer direction matches expected */
4362 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4363 if (do_write != i_write)
4366 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4368 __atapi_pio_bytes(qc, bytes);
4373 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4374 qc->err_mask |= AC_ERR_HSM;
4375 ap->hsm_task_state = HSM_ST_ERR;
4379 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4380 * @ap: the target ata_port
4384 * 1 if ok in workqueue, 0 otherwise.
4387 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4389 if (qc->tf.flags & ATA_TFLAG_POLLING)
4392 if (ap->hsm_task_state == HSM_ST_FIRST) {
4393 if (qc->tf.protocol == ATA_PROT_PIO &&
4394 (qc->tf.flags & ATA_TFLAG_WRITE))
4397 if (is_atapi_taskfile(&qc->tf) &&
4398 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4406 * ata_hsm_qc_complete - finish a qc running on standard HSM
4407 * @qc: Command to complete
4408 * @in_wq: 1 if called from workqueue, 0 otherwise
4410 * Finish @qc which is running on standard HSM.
4413 * If @in_wq is zero, spin_lock_irqsave(host lock).
4414 * Otherwise, none on entry and grabs host lock.
4416 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4418 struct ata_port *ap = qc->ap;
4419 unsigned long flags;
4421 if (ap->ops->error_handler) {
4423 spin_lock_irqsave(ap->lock, flags);
4425 /* EH might have kicked in while host lock is
4428 qc = ata_qc_from_tag(ap, qc->tag);
4430 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4431 ap->ops->irq_on(ap);
4432 ata_qc_complete(qc);
4434 ata_port_freeze(ap);
4437 spin_unlock_irqrestore(ap->lock, flags);
4439 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4440 ata_qc_complete(qc);
4442 ata_port_freeze(ap);
4446 spin_lock_irqsave(ap->lock, flags);
4447 ap->ops->irq_on(ap);
4448 ata_qc_complete(qc);
4449 spin_unlock_irqrestore(ap->lock, flags);
4451 ata_qc_complete(qc);
4454 ata_altstatus(ap); /* flush */
4458 * ata_hsm_move - move the HSM to the next state.
4459 * @ap: the target ata_port
4461 * @status: current device status
4462 * @in_wq: 1 if called from workqueue, 0 otherwise
4465 * 1 when poll next status needed, 0 otherwise.
4467 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4468 u8 status, int in_wq)
4470 unsigned long flags = 0;
4473 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4475 /* Make sure ata_qc_issue_prot() does not throw things
4476 * like DMA polling into the workqueue. Notice that
4477 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4479 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4482 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4483 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4485 switch (ap->hsm_task_state) {
4487 /* Send first data block or PACKET CDB */
4489 /* If polling, we will stay in the work queue after
4490 * sending the data. Otherwise, interrupt handler
4491 * takes over after sending the data.
4493 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4495 /* check device status */
4496 if (unlikely((status & ATA_DRQ) == 0)) {
4497 /* handle BSY=0, DRQ=0 as error */
4498 if (likely(status & (ATA_ERR | ATA_DF)))
4499 /* device stops HSM for abort/error */
4500 qc->err_mask |= AC_ERR_DEV;
4502 /* HSM violation. Let EH handle this */
4503 qc->err_mask |= AC_ERR_HSM;
4505 ap->hsm_task_state = HSM_ST_ERR;
4509 /* Device should not ask for data transfer (DRQ=1)
4510 * when it finds something wrong.
4511 * We ignore DRQ here and stop the HSM by
4512 * changing hsm_task_state to HSM_ST_ERR and
4513 * let the EH abort the command or reset the device.
4515 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4516 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4517 "error, dev_stat 0x%X\n", status);
4518 qc->err_mask |= AC_ERR_HSM;
4519 ap->hsm_task_state = HSM_ST_ERR;
4523 /* Send the CDB (atapi) or the first data block (ata pio out).
4524 * During the state transition, interrupt handler shouldn't
4525 * be invoked before the data transfer is complete and
4526 * hsm_task_state is changed. Hence, the following locking.
4529 spin_lock_irqsave(ap->lock, flags);
4531 if (qc->tf.protocol == ATA_PROT_PIO) {
4532 /* PIO data out protocol.
4533 * send first data block.
4536 /* ata_pio_sectors() might change the state
4537 * to HSM_ST_LAST. so, the state is changed here
4538 * before ata_pio_sectors().
4540 ap->hsm_task_state = HSM_ST;
4541 ata_pio_sectors(qc);
4542 ata_altstatus(ap); /* flush */
4545 atapi_send_cdb(ap, qc);
4548 spin_unlock_irqrestore(ap->lock, flags);
4550 /* if polling, ata_pio_task() handles the rest.
4551 * otherwise, interrupt handler takes over from here.
4556 /* complete command or read/write the data register */
4557 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4558 /* ATAPI PIO protocol */
4559 if ((status & ATA_DRQ) == 0) {
4560 /* No more data to transfer or device error.
4561 * Device error will be tagged in HSM_ST_LAST.
4563 ap->hsm_task_state = HSM_ST_LAST;
4567 /* Device should not ask for data transfer (DRQ=1)
4568 * when it finds something wrong.
4569 * We ignore DRQ here and stop the HSM by
4570 * changing hsm_task_state to HSM_ST_ERR and
4571 * let the EH abort the command or reset the device.
4573 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4574 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4575 "device error, dev_stat 0x%X\n",
4577 qc->err_mask |= AC_ERR_HSM;
4578 ap->hsm_task_state = HSM_ST_ERR;
4582 atapi_pio_bytes(qc);
4584 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4585 /* bad ireason reported by device */
4589 /* ATA PIO protocol */
4590 if (unlikely((status & ATA_DRQ) == 0)) {
4591 /* handle BSY=0, DRQ=0 as error */
4592 if (likely(status & (ATA_ERR | ATA_DF)))
4593 /* device stops HSM for abort/error */
4594 qc->err_mask |= AC_ERR_DEV;
4596 /* HSM violation. Let EH handle this.
4597 * Phantom devices also trigger this
4598 * condition. Mark hint.
4600 qc->err_mask |= AC_ERR_HSM |
4603 ap->hsm_task_state = HSM_ST_ERR;
4607 /* For PIO reads, some devices may ask for
4608 * data transfer (DRQ=1) alone with ERR=1.
4609 * We respect DRQ here and transfer one
4610 * block of junk data before changing the
4611 * hsm_task_state to HSM_ST_ERR.
4613 * For PIO writes, ERR=1 DRQ=1 doesn't make
4614 * sense since the data block has been
4615 * transferred to the device.
4617 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4618 /* data might be corrputed */
4619 qc->err_mask |= AC_ERR_DEV;
4621 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4622 ata_pio_sectors(qc);
4624 status = ata_wait_idle(ap);
4627 if (status & (ATA_BUSY | ATA_DRQ))
4628 qc->err_mask |= AC_ERR_HSM;
4630 /* ata_pio_sectors() might change the
4631 * state to HSM_ST_LAST. so, the state
4632 * is changed after ata_pio_sectors().
4634 ap->hsm_task_state = HSM_ST_ERR;
4638 ata_pio_sectors(qc);
4640 if (ap->hsm_task_state == HSM_ST_LAST &&
4641 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4644 status = ata_wait_idle(ap);
4649 ata_altstatus(ap); /* flush */
4654 if (unlikely(!ata_ok(status))) {
4655 qc->err_mask |= __ac_err_mask(status);
4656 ap->hsm_task_state = HSM_ST_ERR;
4660 /* no more data to transfer */
4661 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4662 ap->print_id, qc->dev->devno, status);
4664 WARN_ON(qc->err_mask);
4666 ap->hsm_task_state = HSM_ST_IDLE;
4668 /* complete taskfile transaction */
4669 ata_hsm_qc_complete(qc, in_wq);
4675 /* make sure qc->err_mask is available to
4676 * know what's wrong and recover
4678 WARN_ON(qc->err_mask == 0);
4680 ap->hsm_task_state = HSM_ST_IDLE;
4682 /* complete taskfile transaction */
4683 ata_hsm_qc_complete(qc, in_wq);
4695 static void ata_pio_task(struct work_struct *work)
4697 struct ata_port *ap =
4698 container_of(work, struct ata_port, port_task.work);
4699 struct ata_queued_cmd *qc = ap->port_task_data;
4704 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
4707 * This is purely heuristic. This is a fast path.
4708 * Sometimes when we enter, BSY will be cleared in
4709 * a chk-status or two. If not, the drive is probably seeking
4710 * or something. Snooze for a couple msecs, then
4711 * chk-status again. If still busy, queue delayed work.
4713 status = ata_busy_wait(ap, ATA_BUSY, 5);
4714 if (status & ATA_BUSY) {
4716 status = ata_busy_wait(ap, ATA_BUSY, 10);
4717 if (status & ATA_BUSY) {
4718 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
4724 poll_next = ata_hsm_move(ap, qc, status, 1);
4726 /* another command or interrupt handler
4727 * may be running at this point.
4734 * ata_qc_new - Request an available ATA command, for queueing
4735 * @ap: Port associated with device @dev
4736 * @dev: Device from whom we request an available command structure
4742 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4744 struct ata_queued_cmd *qc = NULL;
4747 /* no command while frozen */
4748 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
4751 /* the last tag is reserved for internal command. */
4752 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
4753 if (!test_and_set_bit(i, &ap->qc_allocated)) {
4754 qc = __ata_qc_from_tag(ap, i);
4765 * ata_qc_new_init - Request an available ATA command, and initialize it
4766 * @dev: Device from whom we request an available command structure
4772 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
4774 struct ata_port *ap = dev->ap;
4775 struct ata_queued_cmd *qc;
4777 qc = ata_qc_new(ap);
4790 * ata_qc_free - free unused ata_queued_cmd
4791 * @qc: Command to complete
4793 * Designed to free unused ata_queued_cmd object
4794 * in case something prevents using it.
4797 * spin_lock_irqsave(host lock)
4799 void ata_qc_free(struct ata_queued_cmd *qc)
4801 struct ata_port *ap = qc->ap;
4804 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4808 if (likely(ata_tag_valid(tag))) {
4809 qc->tag = ATA_TAG_POISON;
4810 clear_bit(tag, &ap->qc_allocated);
4814 void __ata_qc_complete(struct ata_queued_cmd *qc)
4816 struct ata_port *ap = qc->ap;
4818 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4819 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
4821 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4824 /* command should be marked inactive atomically with qc completion */
4825 if (qc->tf.protocol == ATA_PROT_NCQ)
4826 ap->sactive &= ~(1 << qc->tag);
4828 ap->active_tag = ATA_TAG_POISON;
4830 /* atapi: mark qc as inactive to prevent the interrupt handler
4831 * from completing the command twice later, before the error handler
4832 * is called. (when rc != 0 and atapi request sense is needed)
4834 qc->flags &= ~ATA_QCFLAG_ACTIVE;
4835 ap->qc_active &= ~(1 << qc->tag);
4837 /* call completion callback */
4838 qc->complete_fn(qc);
4841 static void fill_result_tf(struct ata_queued_cmd *qc)
4843 struct ata_port *ap = qc->ap;
4845 qc->result_tf.flags = qc->tf.flags;
4846 ap->ops->tf_read(ap, &qc->result_tf);
4850 * ata_qc_complete - Complete an active ATA command
4851 * @qc: Command to complete
4852 * @err_mask: ATA Status register contents
4854 * Indicate to the mid and upper layers that an ATA
4855 * command has completed, with either an ok or not-ok status.
4858 * spin_lock_irqsave(host lock)
4860 void ata_qc_complete(struct ata_queued_cmd *qc)
4862 struct ata_port *ap = qc->ap;
4864 /* XXX: New EH and old EH use different mechanisms to
4865 * synchronize EH with regular execution path.
4867 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4868 * Normal execution path is responsible for not accessing a
4869 * failed qc. libata core enforces the rule by returning NULL
4870 * from ata_qc_from_tag() for failed qcs.
4872 * Old EH depends on ata_qc_complete() nullifying completion
4873 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4874 * not synchronize with interrupt handler. Only PIO task is
4877 if (ap->ops->error_handler) {
4878 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
4880 if (unlikely(qc->err_mask))
4881 qc->flags |= ATA_QCFLAG_FAILED;
4883 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4884 if (!ata_tag_internal(qc->tag)) {
4885 /* always fill result TF for failed qc */
4887 ata_qc_schedule_eh(qc);
4892 /* read result TF if requested */
4893 if (qc->flags & ATA_QCFLAG_RESULT_TF)
4896 __ata_qc_complete(qc);
4898 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4901 /* read result TF if failed or requested */
4902 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
4905 __ata_qc_complete(qc);
4910 * ata_qc_complete_multiple - Complete multiple qcs successfully
4911 * @ap: port in question
4912 * @qc_active: new qc_active mask
4913 * @finish_qc: LLDD callback invoked before completing a qc
4915 * Complete in-flight commands. This functions is meant to be
4916 * called from low-level driver's interrupt routine to complete
4917 * requests normally. ap->qc_active and @qc_active is compared
4918 * and commands are completed accordingly.
4921 * spin_lock_irqsave(host lock)
4924 * Number of completed commands on success, -errno otherwise.
4926 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4927 void (*finish_qc)(struct ata_queued_cmd *))
4933 done_mask = ap->qc_active ^ qc_active;
4935 if (unlikely(done_mask & qc_active)) {
4936 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4937 "(%08x->%08x)\n", ap->qc_active, qc_active);
4941 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4942 struct ata_queued_cmd *qc;
4944 if (!(done_mask & (1 << i)))
4947 if ((qc = ata_qc_from_tag(ap, i))) {
4950 ata_qc_complete(qc);
4958 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4960 struct ata_port *ap = qc->ap;
4962 switch (qc->tf.protocol) {
4965 case ATA_PROT_ATAPI_DMA:
4968 case ATA_PROT_ATAPI:
4970 if (ap->flags & ATA_FLAG_PIO_DMA)
4983 * ata_qc_issue - issue taskfile to device
4984 * @qc: command to issue to device
4986 * Prepare an ATA command to submission to device.
4987 * This includes mapping the data into a DMA-able
4988 * area, filling in the S/G table, and finally
4989 * writing the taskfile to hardware, starting the command.
4992 * spin_lock_irqsave(host lock)
4994 void ata_qc_issue(struct ata_queued_cmd *qc)
4996 struct ata_port *ap = qc->ap;
4998 /* Make sure only one non-NCQ command is outstanding. The
4999 * check is skipped for old EH because it reuses active qc to
5000 * request ATAPI sense.
5002 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5004 if (qc->tf.protocol == ATA_PROT_NCQ) {
5005 WARN_ON(ap->sactive & (1 << qc->tag));
5006 ap->sactive |= 1 << qc->tag;
5008 WARN_ON(ap->sactive);
5009 ap->active_tag = qc->tag;
5012 qc->flags |= ATA_QCFLAG_ACTIVE;
5013 ap->qc_active |= 1 << qc->tag;
5015 if (ata_should_dma_map(qc)) {
5016 if (qc->flags & ATA_QCFLAG_SG) {
5017 if (ata_sg_setup(qc))
5019 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5020 if (ata_sg_setup_one(qc))
5024 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5027 ap->ops->qc_prep(qc);
5029 qc->err_mask |= ap->ops->qc_issue(qc);
5030 if (unlikely(qc->err_mask))
5035 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5036 qc->err_mask |= AC_ERR_SYSTEM;
5038 ata_qc_complete(qc);
5042 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5043 * @qc: command to issue to device
5045 * Using various libata functions and hooks, this function
5046 * starts an ATA command. ATA commands are grouped into
5047 * classes called "protocols", and issuing each type of protocol
5048 * is slightly different.
5050 * May be used as the qc_issue() entry in ata_port_operations.
5053 * spin_lock_irqsave(host lock)
5056 * Zero on success, AC_ERR_* mask on failure
5059 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5061 struct ata_port *ap = qc->ap;
5063 /* Use polling pio if the LLD doesn't handle
5064 * interrupt driven pio and atapi CDB interrupt.
5066 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5067 switch (qc->tf.protocol) {
5069 case ATA_PROT_NODATA:
5070 case ATA_PROT_ATAPI:
5071 case ATA_PROT_ATAPI_NODATA:
5072 qc->tf.flags |= ATA_TFLAG_POLLING;
5074 case ATA_PROT_ATAPI_DMA:
5075 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5076 /* see ata_dma_blacklisted() */
5084 /* Some controllers show flaky interrupt behavior after
5085 * setting xfer mode. Use polling instead.
5087 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5088 qc->tf.feature == SETFEATURES_XFER) &&
5089 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5090 qc->tf.flags |= ATA_TFLAG_POLLING;
5092 /* select the device */
5093 ata_dev_select(ap, qc->dev->devno, 1, 0);
5095 /* start the command */
5096 switch (qc->tf.protocol) {
5097 case ATA_PROT_NODATA:
5098 if (qc->tf.flags & ATA_TFLAG_POLLING)
5099 ata_qc_set_polling(qc);
5101 ata_tf_to_host(ap, &qc->tf);
5102 ap->hsm_task_state = HSM_ST_LAST;
5104 if (qc->tf.flags & ATA_TFLAG_POLLING)
5105 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5110 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5112 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5113 ap->ops->bmdma_setup(qc); /* set up bmdma */
5114 ap->ops->bmdma_start(qc); /* initiate bmdma */
5115 ap->hsm_task_state = HSM_ST_LAST;
5119 if (qc->tf.flags & ATA_TFLAG_POLLING)
5120 ata_qc_set_polling(qc);
5122 ata_tf_to_host(ap, &qc->tf);
5124 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5125 /* PIO data out protocol */
5126 ap->hsm_task_state = HSM_ST_FIRST;
5127 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5129 /* always send first data block using
5130 * the ata_pio_task() codepath.
5133 /* PIO data in protocol */
5134 ap->hsm_task_state = HSM_ST;
5136 if (qc->tf.flags & ATA_TFLAG_POLLING)
5137 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5139 /* if polling, ata_pio_task() handles the rest.
5140 * otherwise, interrupt handler takes over from here.
5146 case ATA_PROT_ATAPI:
5147 case ATA_PROT_ATAPI_NODATA:
5148 if (qc->tf.flags & ATA_TFLAG_POLLING)
5149 ata_qc_set_polling(qc);
5151 ata_tf_to_host(ap, &qc->tf);
5153 ap->hsm_task_state = HSM_ST_FIRST;
5155 /* send cdb by polling if no cdb interrupt */
5156 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5157 (qc->tf.flags & ATA_TFLAG_POLLING))
5158 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5161 case ATA_PROT_ATAPI_DMA:
5162 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5164 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5165 ap->ops->bmdma_setup(qc); /* set up bmdma */
5166 ap->hsm_task_state = HSM_ST_FIRST;
5168 /* send cdb by polling if no cdb interrupt */
5169 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5170 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5175 return AC_ERR_SYSTEM;
5182 * ata_host_intr - Handle host interrupt for given (port, task)
5183 * @ap: Port on which interrupt arrived (possibly...)
5184 * @qc: Taskfile currently active in engine
5186 * Handle host interrupt for given queued command. Currently,
5187 * only DMA interrupts are handled. All other commands are
5188 * handled via polling with interrupts disabled (nIEN bit).
5191 * spin_lock_irqsave(host lock)
5194 * One if interrupt was handled, zero if not (shared irq).
5197 inline unsigned int ata_host_intr (struct ata_port *ap,
5198 struct ata_queued_cmd *qc)
5200 struct ata_eh_info *ehi = &ap->eh_info;
5201 u8 status, host_stat = 0;
5203 VPRINTK("ata%u: protocol %d task_state %d\n",
5204 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5206 /* Check whether we are expecting interrupt in this state */
5207 switch (ap->hsm_task_state) {
5209 /* Some pre-ATAPI-4 devices assert INTRQ
5210 * at this state when ready to receive CDB.
5213 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5214 * The flag was turned on only for atapi devices.
5215 * No need to check is_atapi_taskfile(&qc->tf) again.
5217 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5221 if (qc->tf.protocol == ATA_PROT_DMA ||
5222 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5223 /* check status of DMA engine */
5224 host_stat = ap->ops->bmdma_status(ap);
5225 VPRINTK("ata%u: host_stat 0x%X\n",
5226 ap->print_id, host_stat);
5228 /* if it's not our irq... */
5229 if (!(host_stat & ATA_DMA_INTR))
5232 /* before we do anything else, clear DMA-Start bit */
5233 ap->ops->bmdma_stop(qc);
5235 if (unlikely(host_stat & ATA_DMA_ERR)) {
5236 /* error when transfering data to/from memory */
5237 qc->err_mask |= AC_ERR_HOST_BUS;
5238 ap->hsm_task_state = HSM_ST_ERR;
5248 /* check altstatus */
5249 status = ata_altstatus(ap);
5250 if (status & ATA_BUSY)
5253 /* check main status, clearing INTRQ */
5254 status = ata_chk_status(ap);
5255 if (unlikely(status & ATA_BUSY))
5258 /* ack bmdma irq events */
5259 ap->ops->irq_clear(ap);
5261 ata_hsm_move(ap, qc, status, 0);
5263 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5264 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5265 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5267 return 1; /* irq handled */
5270 ap->stats.idle_irq++;
5273 if ((ap->stats.idle_irq % 1000) == 0) {
5274 ap->ops->irq_ack(ap, 0); /* debug trap */
5275 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5279 return 0; /* irq not handled */
5283 * ata_interrupt - Default ATA host interrupt handler
5284 * @irq: irq line (unused)
5285 * @dev_instance: pointer to our ata_host information structure
5287 * Default interrupt handler for PCI IDE devices. Calls
5288 * ata_host_intr() for each port that is not disabled.
5291 * Obtains host lock during operation.
5294 * IRQ_NONE or IRQ_HANDLED.
5297 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5299 struct ata_host *host = dev_instance;
5301 unsigned int handled = 0;
5302 unsigned long flags;
5304 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5305 spin_lock_irqsave(&host->lock, flags);
5307 for (i = 0; i < host->n_ports; i++) {
5308 struct ata_port *ap;
5310 ap = host->ports[i];
5312 !(ap->flags & ATA_FLAG_DISABLED)) {
5313 struct ata_queued_cmd *qc;
5315 qc = ata_qc_from_tag(ap, ap->active_tag);
5316 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5317 (qc->flags & ATA_QCFLAG_ACTIVE))
5318 handled |= ata_host_intr(ap, qc);
5322 spin_unlock_irqrestore(&host->lock, flags);
5324 return IRQ_RETVAL(handled);
5328 * sata_scr_valid - test whether SCRs are accessible
5329 * @ap: ATA port to test SCR accessibility for
5331 * Test whether SCRs are accessible for @ap.
5337 * 1 if SCRs are accessible, 0 otherwise.
5339 int sata_scr_valid(struct ata_port *ap)
5341 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5345 * sata_scr_read - read SCR register of the specified port
5346 * @ap: ATA port to read SCR for
5348 * @val: Place to store read value
5350 * Read SCR register @reg of @ap into *@val. This function is
5351 * guaranteed to succeed if the cable type of the port is SATA
5352 * and the port implements ->scr_read.
5358 * 0 on success, negative errno on failure.
5360 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5362 if (sata_scr_valid(ap)) {
5363 *val = ap->ops->scr_read(ap, reg);
5370 * sata_scr_write - write SCR register of the specified port
5371 * @ap: ATA port to write SCR for
5372 * @reg: SCR to write
5373 * @val: value to write
5375 * Write @val to SCR register @reg of @ap. This function is
5376 * guaranteed to succeed if the cable type of the port is SATA
5377 * and the port implements ->scr_read.
5383 * 0 on success, negative errno on failure.
5385 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5387 if (sata_scr_valid(ap)) {
5388 ap->ops->scr_write(ap, reg, val);
5395 * sata_scr_write_flush - write SCR register of the specified port and flush
5396 * @ap: ATA port to write SCR for
5397 * @reg: SCR to write
5398 * @val: value to write
5400 * This function is identical to sata_scr_write() except that this
5401 * function performs flush after writing to the register.
5407 * 0 on success, negative errno on failure.
5409 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5411 if (sata_scr_valid(ap)) {
5412 ap->ops->scr_write(ap, reg, val);
5413 ap->ops->scr_read(ap, reg);
5420 * ata_port_online - test whether the given port is online
5421 * @ap: ATA port to test
5423 * Test whether @ap is online. Note that this function returns 0
5424 * if online status of @ap cannot be obtained, so
5425 * ata_port_online(ap) != !ata_port_offline(ap).
5431 * 1 if the port online status is available and online.
5433 int ata_port_online(struct ata_port *ap)
5437 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5443 * ata_port_offline - test whether the given port is offline
5444 * @ap: ATA port to test
5446 * Test whether @ap is offline. Note that this function returns
5447 * 0 if offline status of @ap cannot be obtained, so
5448 * ata_port_online(ap) != !ata_port_offline(ap).
5454 * 1 if the port offline status is available and offline.
5456 int ata_port_offline(struct ata_port *ap)
5460 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5465 int ata_flush_cache(struct ata_device *dev)
5467 unsigned int err_mask;
5470 if (!ata_try_flush_cache(dev))
5473 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5474 cmd = ATA_CMD_FLUSH_EXT;
5476 cmd = ATA_CMD_FLUSH;
5478 err_mask = ata_do_simple_cmd(dev, cmd);
5480 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5488 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5489 unsigned int action, unsigned int ehi_flags,
5492 unsigned long flags;
5495 for (i = 0; i < host->n_ports; i++) {
5496 struct ata_port *ap = host->ports[i];
5498 /* Previous resume operation might still be in
5499 * progress. Wait for PM_PENDING to clear.
5501 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5502 ata_port_wait_eh(ap);
5503 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5506 /* request PM ops to EH */
5507 spin_lock_irqsave(ap->lock, flags);
5512 ap->pm_result = &rc;
5515 ap->pflags |= ATA_PFLAG_PM_PENDING;
5516 ap->eh_info.action |= action;
5517 ap->eh_info.flags |= ehi_flags;
5519 ata_port_schedule_eh(ap);
5521 spin_unlock_irqrestore(ap->lock, flags);
5523 /* wait and check result */
5525 ata_port_wait_eh(ap);
5526 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5536 * ata_host_suspend - suspend host
5537 * @host: host to suspend
5540 * Suspend @host. Actual operation is performed by EH. This
5541 * function requests EH to perform PM operations and waits for EH
5545 * Kernel thread context (may sleep).
5548 * 0 on success, -errno on failure.
5550 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5554 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5558 /* EH is quiescent now. Fail if we have any ready device.
5559 * This happens if hotplug occurs between completion of device
5560 * suspension and here.
5562 for (i = 0; i < host->n_ports; i++) {
5563 struct ata_port *ap = host->ports[i];
5565 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5566 struct ata_device *dev = &ap->device[j];
5568 if (ata_dev_ready(dev)) {
5569 ata_port_printk(ap, KERN_WARNING,
5570 "suspend failed, device %d "
5571 "still active\n", dev->devno);
5578 host->dev->power.power_state = mesg;
5582 ata_host_resume(host);
5587 * ata_host_resume - resume host
5588 * @host: host to resume
5590 * Resume @host. Actual operation is performed by EH. This
5591 * function requests EH to perform PM operations and returns.
5592 * Note that all resume operations are performed parallely.
5595 * Kernel thread context (may sleep).
5597 void ata_host_resume(struct ata_host *host)
5599 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5600 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5601 host->dev->power.power_state = PMSG_ON;
5606 * ata_port_start - Set port up for dma.
5607 * @ap: Port to initialize
5609 * Called just after data structures for each port are
5610 * initialized. Allocates space for PRD table.
5612 * May be used as the port_start() entry in ata_port_operations.
5615 * Inherited from caller.
5617 int ata_port_start(struct ata_port *ap)
5619 struct device *dev = ap->dev;
5622 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5627 rc = ata_pad_alloc(ap, dev);
5631 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5632 (unsigned long long)ap->prd_dma);
5637 * ata_dev_init - Initialize an ata_device structure
5638 * @dev: Device structure to initialize
5640 * Initialize @dev in preparation for probing.
5643 * Inherited from caller.
5645 void ata_dev_init(struct ata_device *dev)
5647 struct ata_port *ap = dev->ap;
5648 unsigned long flags;
5650 /* SATA spd limit is bound to the first device */
5651 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5653 /* High bits of dev->flags are used to record warm plug
5654 * requests which occur asynchronously. Synchronize using
5657 spin_lock_irqsave(ap->lock, flags);
5658 dev->flags &= ~ATA_DFLAG_INIT_MASK;
5659 spin_unlock_irqrestore(ap->lock, flags);
5661 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5662 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
5663 dev->pio_mask = UINT_MAX;
5664 dev->mwdma_mask = UINT_MAX;
5665 dev->udma_mask = UINT_MAX;
5669 * ata_port_init - Initialize an ata_port structure
5670 * @ap: Structure to initialize
5671 * @host: Collection of hosts to which @ap belongs
5672 * @ent: Probe information provided by low-level driver
5673 * @port_no: Port number associated with this ata_port
5675 * Initialize a new ata_port structure.
5678 * Inherited from caller.
5680 void ata_port_init(struct ata_port *ap, struct ata_host *host,
5681 const struct ata_probe_ent *ent, unsigned int port_no)
5685 ap->lock = &host->lock;
5686 ap->flags = ATA_FLAG_DISABLED;
5687 ap->print_id = ata_print_id++;
5688 ap->ctl = ATA_DEVCTL_OBS;
5691 ap->port_no = port_no;
5692 if (port_no == 1 && ent->pinfo2) {
5693 ap->pio_mask = ent->pinfo2->pio_mask;
5694 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5695 ap->udma_mask = ent->pinfo2->udma_mask;
5696 ap->flags |= ent->pinfo2->flags;
5697 ap->ops = ent->pinfo2->port_ops;
5699 ap->pio_mask = ent->pio_mask;
5700 ap->mwdma_mask = ent->mwdma_mask;
5701 ap->udma_mask = ent->udma_mask;
5702 ap->flags |= ent->port_flags;
5703 ap->ops = ent->port_ops;
5705 ap->hw_sata_spd_limit = UINT_MAX;
5706 ap->active_tag = ATA_TAG_POISON;
5707 ap->last_ctl = 0xFF;
5709 #if defined(ATA_VERBOSE_DEBUG)
5710 /* turn on all debugging levels */
5711 ap->msg_enable = 0x00FF;
5712 #elif defined(ATA_DEBUG)
5713 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
5715 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
5718 INIT_DELAYED_WORK(&ap->port_task, NULL);
5719 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5720 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
5721 INIT_LIST_HEAD(&ap->eh_done_q);
5722 init_waitqueue_head(&ap->eh_wait_q);
5724 /* set cable type */
5725 ap->cbl = ATA_CBL_NONE;
5726 if (ap->flags & ATA_FLAG_SATA)
5727 ap->cbl = ATA_CBL_SATA;
5729 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5730 struct ata_device *dev = &ap->device[i];
5737 ap->stats.unhandled_irq = 1;
5738 ap->stats.idle_irq = 1;
5741 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5745 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5746 * @ap: ATA port to initialize SCSI host for
5747 * @shost: SCSI host associated with @ap
5749 * Initialize SCSI host @shost associated with ATA port @ap.
5752 * Inherited from caller.
5754 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
5756 ap->scsi_host = shost;
5758 shost->unique_id = ap->print_id;
5761 shost->max_channel = 1;
5762 shost->max_cmd_len = 16;
5766 * ata_port_add - Attach low-level ATA driver to system
5767 * @ent: Information provided by low-level driver
5768 * @host: Collections of ports to which we add
5769 * @port_no: Port number associated with this host
5771 * Attach low-level ATA driver to system.
5774 * PCI/etc. bus probe sem.
5777 * New ata_port on success, for NULL on error.
5779 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
5780 struct ata_host *host,
5781 unsigned int port_no)
5783 struct Scsi_Host *shost;
5784 struct ata_port *ap;
5788 if (!ent->port_ops->error_handler &&
5789 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
5790 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5795 ap = kzalloc(sizeof(struct ata_port), GFP_KERNEL);
5799 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port *));
5805 *(struct ata_port **)&shost->hostdata[0] = ap;
5806 shost->transportt = &ata_scsi_transport_template;
5808 ata_port_init(ap, host, ent, port_no);
5809 ata_port_init_shost(ap, shost);
5814 static void ata_host_release(struct device *gendev, void *res)
5816 struct ata_host *host = dev_get_drvdata(gendev);
5819 for (i = 0; i < host->n_ports; i++) {
5820 struct ata_port *ap = host->ports[i];
5825 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
5826 ap->ops->port_stop(ap);
5829 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
5830 host->ops->host_stop(host);
5832 for (i = 0; i < host->n_ports; i++) {
5833 struct ata_port *ap = host->ports[i];
5839 scsi_host_put(ap->scsi_host);
5842 host->ports[i] = NULL;
5845 dev_set_drvdata(gendev, NULL);
5849 * ata_host_start - start and freeze ports of an ATA host
5850 * @host: ATA host to start ports for
5852 * Start and then freeze ports of @host. Started status is
5853 * recorded in host->flags, so this function can be called
5854 * multiple times. Ports are guaranteed to get started only
5858 * Inherited from calling layer (may sleep).
5861 * 0 if all ports are started successfully, -errno otherwise.
5863 int ata_host_start(struct ata_host *host)
5867 if (host->flags & ATA_HOST_STARTED)
5870 for (i = 0; i < host->n_ports; i++) {
5871 struct ata_port *ap = host->ports[i];
5873 if (ap->ops->port_start) {
5874 rc = ap->ops->port_start(ap);
5876 ata_port_printk(ap, KERN_ERR, "failed to "
5877 "start port (errno=%d)\n", rc);
5882 ata_eh_freeze_port(ap);
5885 host->flags |= ATA_HOST_STARTED;
5890 struct ata_port *ap = host->ports[i];
5892 if (ap->ops->port_stop)
5893 ap->ops->port_stop(ap);
5899 * ata_sas_host_init - Initialize a host struct
5900 * @host: host to initialize
5901 * @dev: device host is attached to
5902 * @flags: host flags
5906 * PCI/etc. bus probe sem.
5910 void ata_host_init(struct ata_host *host, struct device *dev,
5911 unsigned long flags, const struct ata_port_operations *ops)
5913 spin_lock_init(&host->lock);
5915 host->flags = flags;
5920 * ata_device_add - Register hardware device with ATA and SCSI layers
5921 * @ent: Probe information describing hardware device to be registered
5923 * This function processes the information provided in the probe
5924 * information struct @ent, allocates the necessary ATA and SCSI
5925 * host information structures, initializes them, and registers
5926 * everything with requisite kernel subsystems.
5928 * This function requests irqs, probes the ATA bus, and probes
5932 * PCI/etc. bus probe sem.
5935 * Number of ports registered. Zero on error (no ports registered).
5937 int ata_device_add(const struct ata_probe_ent *ent)
5940 struct device *dev = ent->dev;
5941 struct ata_host *host;
5946 if (ent->irq == 0) {
5947 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5951 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5954 /* alloc a container for our list of ATA ports (buses) */
5955 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5956 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5959 devres_add(dev, host);
5960 dev_set_drvdata(dev, host);
5962 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5963 host->n_ports = ent->n_ports;
5964 host->irq = ent->irq;
5965 host->irq2 = ent->irq2;
5966 host->iomap = ent->iomap;
5967 host->private_data = ent->private_data;
5969 /* register each port bound to this device */
5970 for (i = 0; i < host->n_ports; i++) {
5971 struct ata_port *ap;
5972 unsigned long xfer_mode_mask;
5973 int irq_line = ent->irq;
5975 ap = ata_port_add(ent, host, i);
5976 host->ports[i] = ap;
5981 if (ent->dummy_port_mask & (1 << i)) {
5982 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5983 ap->ops = &ata_dummy_port_ops;
5987 /* Report the secondary IRQ for second channel legacy */
5988 if (i == 1 && ent->irq2)
5989 irq_line = ent->irq2;
5991 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5992 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5993 (ap->pio_mask << ATA_SHIFT_PIO);
5995 /* print per-port info to dmesg */
5996 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5997 "ctl 0x%p bmdma 0x%p irq %d\n",
5998 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5999 ata_mode_string(xfer_mode_mask),
6000 ap->ioaddr.cmd_addr,
6001 ap->ioaddr.ctl_addr,
6002 ap->ioaddr.bmdma_addr,
6007 rc = ata_host_start(host);
6011 /* obtain irq, that may be shared between channels */
6012 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
6013 ent->irq_flags, DRV_NAME, host);
6015 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
6020 /* do we have a second IRQ for the other channel, eg legacy mode */
6022 /* We will get weird core code crashes later if this is true
6024 BUG_ON(ent->irq == ent->irq2);
6026 rc = devm_request_irq(dev, ent->irq2,
6027 ent->port_ops->irq_handler, ent->irq_flags,
6030 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
6036 /* resource acquisition complete */
6037 devres_remove_group(dev, ata_device_add);
6039 /* perform each probe synchronously */
6040 DPRINTK("probe begin\n");
6041 for (i = 0; i < host->n_ports; i++) {
6042 struct ata_port *ap = host->ports[i];
6046 /* init sata_spd_limit to the current value */
6047 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6048 int spd = (scontrol >> 4) & 0xf;
6049 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6051 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6053 rc = scsi_add_host(ap->scsi_host, dev);
6055 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
6056 /* FIXME: do something useful here */
6057 /* FIXME: handle unconditional calls to
6058 * scsi_scan_host and ata_host_remove, below,
6063 if (ap->ops->error_handler) {
6064 struct ata_eh_info *ehi = &ap->eh_info;
6065 unsigned long flags;
6069 /* kick EH for boot probing */
6070 spin_lock_irqsave(ap->lock, flags);
6072 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6073 ehi->action |= ATA_EH_SOFTRESET;
6074 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6076 ap->pflags |= ATA_PFLAG_LOADING;
6077 ata_port_schedule_eh(ap);
6079 spin_unlock_irqrestore(ap->lock, flags);
6081 /* wait for EH to finish */
6082 ata_port_wait_eh(ap);
6084 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6085 rc = ata_bus_probe(ap);
6086 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6089 /* FIXME: do something useful here?
6090 * Current libata behavior will
6091 * tear down everything when
6092 * the module is removed
6093 * or the h/w is unplugged.
6099 /* probes are done, now scan each port's disk(s) */
6100 DPRINTK("host probe begin\n");
6101 for (i = 0; i < host->n_ports; i++) {
6102 struct ata_port *ap = host->ports[i];
6104 ata_scsi_scan_host(ap);
6107 VPRINTK("EXIT, returning %u\n", ent->n_ports);
6108 return ent->n_ports; /* success */
6111 devres_release_group(dev, ata_device_add);
6112 VPRINTK("EXIT, returning %d\n", rc);
6117 * ata_port_detach - Detach ATA port in prepration of device removal
6118 * @ap: ATA port to be detached
6120 * Detach all ATA devices and the associated SCSI devices of @ap;
6121 * then, remove the associated SCSI host. @ap is guaranteed to
6122 * be quiescent on return from this function.
6125 * Kernel thread context (may sleep).
6127 void ata_port_detach(struct ata_port *ap)
6129 unsigned long flags;
6132 if (!ap->ops->error_handler)
6135 /* tell EH we're leaving & flush EH */
6136 spin_lock_irqsave(ap->lock, flags);
6137 ap->pflags |= ATA_PFLAG_UNLOADING;
6138 spin_unlock_irqrestore(ap->lock, flags);
6140 ata_port_wait_eh(ap);
6142 /* EH is now guaranteed to see UNLOADING, so no new device
6143 * will be attached. Disable all existing devices.
6145 spin_lock_irqsave(ap->lock, flags);
6147 for (i = 0; i < ATA_MAX_DEVICES; i++)
6148 ata_dev_disable(&ap->device[i]);
6150 spin_unlock_irqrestore(ap->lock, flags);
6152 /* Final freeze & EH. All in-flight commands are aborted. EH
6153 * will be skipped and retrials will be terminated with bad
6156 spin_lock_irqsave(ap->lock, flags);
6157 ata_port_freeze(ap); /* won't be thawed */
6158 spin_unlock_irqrestore(ap->lock, flags);
6160 ata_port_wait_eh(ap);
6162 /* Flush hotplug task. The sequence is similar to
6163 * ata_port_flush_task().
6165 flush_workqueue(ata_aux_wq);
6166 cancel_delayed_work(&ap->hotplug_task);
6167 flush_workqueue(ata_aux_wq);
6170 /* remove the associated SCSI host */
6171 scsi_remove_host(ap->scsi_host);
6175 * ata_host_detach - Detach all ports of an ATA host
6176 * @host: Host to detach
6178 * Detach all ports of @host.
6181 * Kernel thread context (may sleep).
6183 void ata_host_detach(struct ata_host *host)
6187 for (i = 0; i < host->n_ports; i++)
6188 ata_port_detach(host->ports[i]);
6191 struct ata_probe_ent *
6192 ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6194 struct ata_probe_ent *probe_ent;
6196 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
6198 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6199 kobject_name(&(dev->kobj)));
6203 INIT_LIST_HEAD(&probe_ent->node);
6204 probe_ent->dev = dev;
6206 probe_ent->sht = port->sht;
6207 probe_ent->port_flags = port->flags;
6208 probe_ent->pio_mask = port->pio_mask;
6209 probe_ent->mwdma_mask = port->mwdma_mask;
6210 probe_ent->udma_mask = port->udma_mask;
6211 probe_ent->port_ops = port->port_ops;
6212 probe_ent->private_data = port->private_data;
6218 * ata_std_ports - initialize ioaddr with standard port offsets.
6219 * @ioaddr: IO address structure to be initialized
6221 * Utility function which initializes data_addr, error_addr,
6222 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6223 * device_addr, status_addr, and command_addr to standard offsets
6224 * relative to cmd_addr.
6226 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6229 void ata_std_ports(struct ata_ioports *ioaddr)
6231 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6232 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6233 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6234 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6235 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6236 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6237 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6238 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6239 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6240 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6247 * ata_pci_remove_one - PCI layer callback for device removal
6248 * @pdev: PCI device that was removed
6250 * PCI layer indicates to libata via this hook that hot-unplug or
6251 * module unload event has occurred. Detach all ports. Resource
6252 * release is handled via devres.
6255 * Inherited from PCI layer (may sleep).
6257 void ata_pci_remove_one(struct pci_dev *pdev)
6259 struct device *dev = pci_dev_to_dev(pdev);
6260 struct ata_host *host = dev_get_drvdata(dev);
6262 ata_host_detach(host);
6265 /* move to PCI subsystem */
6266 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6268 unsigned long tmp = 0;
6270 switch (bits->width) {
6273 pci_read_config_byte(pdev, bits->reg, &tmp8);
6279 pci_read_config_word(pdev, bits->reg, &tmp16);
6285 pci_read_config_dword(pdev, bits->reg, &tmp32);
6296 return (tmp == bits->val) ? 1 : 0;
6300 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6302 pci_save_state(pdev);
6303 pci_disable_device(pdev);
6305 if (mesg.event == PM_EVENT_SUSPEND)
6306 pci_set_power_state(pdev, PCI_D3hot);
6309 int ata_pci_device_do_resume(struct pci_dev *pdev)
6313 pci_set_power_state(pdev, PCI_D0);
6314 pci_restore_state(pdev);
6316 rc = pcim_enable_device(pdev);
6318 dev_printk(KERN_ERR, &pdev->dev,
6319 "failed to enable device after resume (%d)\n", rc);
6323 pci_set_master(pdev);
6327 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6329 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6332 rc = ata_host_suspend(host, mesg);
6336 ata_pci_device_do_suspend(pdev, mesg);
6341 int ata_pci_device_resume(struct pci_dev *pdev)
6343 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6346 rc = ata_pci_device_do_resume(pdev);
6348 ata_host_resume(host);
6351 #endif /* CONFIG_PM */
6353 #endif /* CONFIG_PCI */
6356 static int __init ata_init(void)
6358 ata_probe_timeout *= HZ;
6359 ata_wq = create_workqueue("ata");
6363 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6365 destroy_workqueue(ata_wq);
6369 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6373 static void __exit ata_exit(void)
6375 destroy_workqueue(ata_wq);
6376 destroy_workqueue(ata_aux_wq);
6379 subsys_initcall(ata_init);
6380 module_exit(ata_exit);
6382 static unsigned long ratelimit_time;
6383 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6385 int ata_ratelimit(void)
6388 unsigned long flags;
6390 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6392 if (time_after(jiffies, ratelimit_time)) {
6394 ratelimit_time = jiffies + (HZ/5);
6398 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6404 * ata_wait_register - wait until register value changes
6405 * @reg: IO-mapped register
6406 * @mask: Mask to apply to read register value
6407 * @val: Wait condition
6408 * @interval_msec: polling interval in milliseconds
6409 * @timeout_msec: timeout in milliseconds
6411 * Waiting for some bits of register to change is a common
6412 * operation for ATA controllers. This function reads 32bit LE
6413 * IO-mapped register @reg and tests for the following condition.
6415 * (*@reg & mask) != val
6417 * If the condition is met, it returns; otherwise, the process is
6418 * repeated after @interval_msec until timeout.
6421 * Kernel thread context (may sleep)
6424 * The final register value.
6426 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6427 unsigned long interval_msec,
6428 unsigned long timeout_msec)
6430 unsigned long timeout;
6433 tmp = ioread32(reg);
6435 /* Calculate timeout _after_ the first read to make sure
6436 * preceding writes reach the controller before starting to
6437 * eat away the timeout.
6439 timeout = jiffies + (timeout_msec * HZ) / 1000;
6441 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6442 msleep(interval_msec);
6443 tmp = ioread32(reg);
6452 static void ata_dummy_noret(struct ata_port *ap) { }
6453 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6454 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6456 static u8 ata_dummy_check_status(struct ata_port *ap)
6461 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6463 return AC_ERR_SYSTEM;
6466 const struct ata_port_operations ata_dummy_port_ops = {
6467 .port_disable = ata_port_disable,
6468 .check_status = ata_dummy_check_status,
6469 .check_altstatus = ata_dummy_check_status,
6470 .dev_select = ata_noop_dev_select,
6471 .qc_prep = ata_noop_qc_prep,
6472 .qc_issue = ata_dummy_qc_issue,
6473 .freeze = ata_dummy_noret,
6474 .thaw = ata_dummy_noret,
6475 .error_handler = ata_dummy_noret,
6476 .post_internal_cmd = ata_dummy_qc_noret,
6477 .irq_clear = ata_dummy_noret,
6478 .port_start = ata_dummy_ret0,
6479 .port_stop = ata_dummy_noret,
6483 * libata is essentially a library of internal helper functions for
6484 * low-level ATA host controller drivers. As such, the API/ABI is
6485 * likely to change as new drivers are added and updated.
6486 * Do not depend on ABI/API stability.
6489 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6490 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6491 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6492 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6493 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6494 EXPORT_SYMBOL_GPL(ata_std_ports);
6495 EXPORT_SYMBOL_GPL(ata_host_init);
6496 EXPORT_SYMBOL_GPL(ata_host_start);
6497 EXPORT_SYMBOL_GPL(ata_device_add);
6498 EXPORT_SYMBOL_GPL(ata_host_detach);
6499 EXPORT_SYMBOL_GPL(ata_sg_init);
6500 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6501 EXPORT_SYMBOL_GPL(ata_hsm_move);
6502 EXPORT_SYMBOL_GPL(ata_qc_complete);
6503 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6504 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6505 EXPORT_SYMBOL_GPL(ata_tf_load);
6506 EXPORT_SYMBOL_GPL(ata_tf_read);
6507 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6508 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6509 EXPORT_SYMBOL_GPL(sata_print_link_status);
6510 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6511 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6512 EXPORT_SYMBOL_GPL(ata_check_status);
6513 EXPORT_SYMBOL_GPL(ata_altstatus);
6514 EXPORT_SYMBOL_GPL(ata_exec_command);
6515 EXPORT_SYMBOL_GPL(ata_port_start);
6516 EXPORT_SYMBOL_GPL(ata_interrupt);
6517 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6518 EXPORT_SYMBOL_GPL(ata_data_xfer);
6519 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6520 EXPORT_SYMBOL_GPL(ata_qc_prep);
6521 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6522 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6523 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6524 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6525 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6526 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6527 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6528 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6529 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6530 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6531 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6532 EXPORT_SYMBOL_GPL(ata_port_probe);
6533 EXPORT_SYMBOL_GPL(ata_dev_disable);
6534 EXPORT_SYMBOL_GPL(sata_set_spd);
6535 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6536 EXPORT_SYMBOL_GPL(sata_phy_resume);
6537 EXPORT_SYMBOL_GPL(sata_phy_reset);
6538 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6539 EXPORT_SYMBOL_GPL(ata_bus_reset);
6540 EXPORT_SYMBOL_GPL(ata_std_prereset);
6541 EXPORT_SYMBOL_GPL(ata_std_softreset);
6542 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6543 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6544 EXPORT_SYMBOL_GPL(ata_std_postreset);
6545 EXPORT_SYMBOL_GPL(ata_dev_classify);
6546 EXPORT_SYMBOL_GPL(ata_dev_pair);
6547 EXPORT_SYMBOL_GPL(ata_port_disable);
6548 EXPORT_SYMBOL_GPL(ata_ratelimit);
6549 EXPORT_SYMBOL_GPL(ata_wait_register);
6550 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6551 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6552 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6553 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6554 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6555 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6556 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6557 EXPORT_SYMBOL_GPL(ata_host_intr);
6558 EXPORT_SYMBOL_GPL(sata_scr_valid);
6559 EXPORT_SYMBOL_GPL(sata_scr_read);
6560 EXPORT_SYMBOL_GPL(sata_scr_write);
6561 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6562 EXPORT_SYMBOL_GPL(ata_port_online);
6563 EXPORT_SYMBOL_GPL(ata_port_offline);
6565 EXPORT_SYMBOL_GPL(ata_host_suspend);
6566 EXPORT_SYMBOL_GPL(ata_host_resume);
6567 #endif /* CONFIG_PM */
6568 EXPORT_SYMBOL_GPL(ata_id_string);
6569 EXPORT_SYMBOL_GPL(ata_id_c_string);
6570 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6571 EXPORT_SYMBOL_GPL(ata_device_blacklisted);
6572 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6574 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6575 EXPORT_SYMBOL_GPL(ata_timing_compute);
6576 EXPORT_SYMBOL_GPL(ata_timing_merge);
6579 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6580 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6581 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6582 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6584 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6585 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6586 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6587 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6588 #endif /* CONFIG_PM */
6589 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6590 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6591 #endif /* CONFIG_PCI */
6594 EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6595 EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6596 #endif /* CONFIG_PM */
6598 EXPORT_SYMBOL_GPL(ata_eng_timeout);
6599 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6600 EXPORT_SYMBOL_GPL(ata_port_abort);
6601 EXPORT_SYMBOL_GPL(ata_port_freeze);
6602 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6603 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
6604 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6605 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
6606 EXPORT_SYMBOL_GPL(ata_do_eh);
6607 EXPORT_SYMBOL_GPL(ata_irq_on);
6608 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6609 EXPORT_SYMBOL_GPL(ata_irq_ack);
6610 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
6611 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
6613 EXPORT_SYMBOL_GPL(ata_cable_40wire);
6614 EXPORT_SYMBOL_GPL(ata_cable_80wire);
6615 EXPORT_SYMBOL_GPL(ata_cable_unknown);
6616 EXPORT_SYMBOL_GPL(ata_cable_sata);