2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008-2009: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Originally written by Brett Russ.
9 * Extensive overhaul and enhancement by Mark Lord <mlord@pobox.com>.
11 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; version 2 of the License.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 * --> Develop a low-power-consumption strategy, and implement it.
33 * --> Add sysfs attributes for per-chip / per-HC IRQ coalescing thresholds.
35 * --> [Experiment, Marvell value added] Is it possible to use target
36 * mode to cross-connect two Linux boxes with Marvell cards? If so,
37 * creating LibATA target mode support would be very interesting.
39 * Target mode, for those without docs, is the ability to directly
40 * connect two SATA ports.
44 * 80x1-B2 errata PCI#11:
46 * Users of the 6041/6081 Rev.B2 chips (current is C0)
47 * should be careful to insert those cards only onto PCI-X bus #0,
48 * and only in device slots 0..7, not higher. The chips may not
49 * work correctly otherwise (note: this is a pretty rare condition).
52 #include <linux/kernel.h>
53 #include <linux/module.h>
54 #include <linux/pci.h>
55 #include <linux/init.h>
56 #include <linux/blkdev.h>
57 #include <linux/delay.h>
58 #include <linux/interrupt.h>
59 #include <linux/dmapool.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/device.h>
62 #include <linux/clk.h>
63 #include <linux/platform_device.h>
64 #include <linux/ata_platform.h>
65 #include <linux/mbus.h>
66 #include <linux/bitops.h>
67 #include <linux/gfp.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_cmnd.h>
70 #include <scsi/scsi_device.h>
71 #include <linux/libata.h>
73 #define DRV_NAME "sata_mv"
74 #define DRV_VERSION "1.28"
82 module_param(msi, int, S_IRUGO);
83 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
86 static int irq_coalescing_io_count;
87 module_param(irq_coalescing_io_count, int, S_IRUGO);
88 MODULE_PARM_DESC(irq_coalescing_io_count,
89 "IRQ coalescing I/O count threshold (0..255)");
91 static int irq_coalescing_usecs;
92 module_param(irq_coalescing_usecs, int, S_IRUGO);
93 MODULE_PARM_DESC(irq_coalescing_usecs,
94 "IRQ coalescing time threshold in usecs");
97 /* BAR's are enumerated in terms of pci_resource_start() terms */
98 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
99 MV_IO_BAR = 2, /* offset 0x18: IO space */
100 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
102 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
103 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
105 /* For use with both IRQ coalescing methods ("all ports" or "per-HC" */
106 COAL_CLOCKS_PER_USEC = 150, /* for calculating COAL_TIMEs */
107 MAX_COAL_TIME_THRESHOLD = ((1 << 24) - 1), /* internal clocks count */
108 MAX_COAL_IO_COUNT = 255, /* completed I/O count */
113 * Per-chip ("all ports") interrupt coalescing feature.
114 * This is only for GEN_II / GEN_IIE hardware.
116 * Coalescing defers the interrupt until either the IO_THRESHOLD
117 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
119 COAL_REG_BASE = 0x18000,
120 IRQ_COAL_CAUSE = (COAL_REG_BASE + 0x08),
121 ALL_PORTS_COAL_IRQ = (1 << 4), /* all ports irq event */
123 IRQ_COAL_IO_THRESHOLD = (COAL_REG_BASE + 0xcc),
124 IRQ_COAL_TIME_THRESHOLD = (COAL_REG_BASE + 0xd0),
127 * Registers for the (unused here) transaction coalescing feature:
129 TRAN_COAL_CAUSE_LO = (COAL_REG_BASE + 0x88),
130 TRAN_COAL_CAUSE_HI = (COAL_REG_BASE + 0x8c),
132 SATAHC0_REG_BASE = 0x20000,
134 GPIO_PORT_CTL = 0x104f0,
137 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
138 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
139 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
140 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
143 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
145 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
146 * CRPB needs alignment on a 256B boundary. Size == 256B
147 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
149 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
150 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
152 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
154 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
155 MV_PORT_HC_SHIFT = 2,
156 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
157 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
158 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
161 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
163 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
164 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
166 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
168 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NCQ |
169 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA,
171 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
173 CRQB_FLAG_READ = (1 << 0),
175 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
176 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
177 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
178 CRQB_CMD_ADDR_SHIFT = 8,
179 CRQB_CMD_CS = (0x2 << 11),
180 CRQB_CMD_LAST = (1 << 15),
182 CRPB_FLAG_STATUS_SHIFT = 8,
183 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
184 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
186 EPRD_FLAG_END_OF_TBL = (1 << 31),
188 /* PCI interface registers */
190 MV_PCI_COMMAND = 0xc00,
191 MV_PCI_COMMAND_MWRCOM = (1 << 4), /* PCI Master Write Combining */
192 MV_PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
194 PCI_MAIN_CMD_STS = 0xd30,
195 STOP_PCI_MASTER = (1 << 2),
196 PCI_MASTER_EMPTY = (1 << 3),
197 GLOB_SFT_RST = (1 << 4),
200 MV_PCI_MODE_MASK = 0x30,
202 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
203 MV_PCI_DISC_TIMER = 0xd04,
204 MV_PCI_MSI_TRIGGER = 0xc38,
205 MV_PCI_SERR_MASK = 0xc28,
206 MV_PCI_XBAR_TMOUT = 0x1d04,
207 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
208 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
209 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
210 MV_PCI_ERR_COMMAND = 0x1d50,
212 PCI_IRQ_CAUSE = 0x1d58,
213 PCI_IRQ_MASK = 0x1d5c,
214 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
216 PCIE_IRQ_CAUSE = 0x1900,
217 PCIE_IRQ_MASK = 0x1910,
218 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
220 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
221 PCI_HC_MAIN_IRQ_CAUSE = 0x1d60,
222 PCI_HC_MAIN_IRQ_MASK = 0x1d64,
223 SOC_HC_MAIN_IRQ_CAUSE = 0x20020,
224 SOC_HC_MAIN_IRQ_MASK = 0x20024,
225 ERR_IRQ = (1 << 0), /* shift by (2 * port #) */
226 DONE_IRQ = (1 << 1), /* shift by (2 * port #) */
227 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
228 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
229 DONE_IRQ_0_3 = 0x000000aa, /* DONE_IRQ ports 0,1,2,3 */
230 DONE_IRQ_4_7 = (DONE_IRQ_0_3 << HC_SHIFT), /* 4,5,6,7 */
232 TRAN_COAL_LO_DONE = (1 << 19), /* transaction coalescing */
233 TRAN_COAL_HI_DONE = (1 << 20), /* transaction coalescing */
234 PORTS_0_3_COAL_DONE = (1 << 8), /* HC0 IRQ coalescing */
235 PORTS_4_7_COAL_DONE = (1 << 17), /* HC1 IRQ coalescing */
236 ALL_PORTS_COAL_DONE = (1 << 21), /* GEN_II(E) IRQ coalescing */
237 GPIO_INT = (1 << 22),
238 SELF_INT = (1 << 23),
239 TWSI_INT = (1 << 24),
240 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
241 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
242 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
244 /* SATAHC registers */
248 DMA_IRQ = (1 << 0), /* shift by port # */
249 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
250 DEV_IRQ = (1 << 8), /* shift by port # */
253 * Per-HC (Host-Controller) interrupt coalescing feature.
254 * This is present on all chip generations.
256 * Coalescing defers the interrupt until either the IO_THRESHOLD
257 * (count of completed I/Os) is met, or the TIME_THRESHOLD is met.
259 HC_IRQ_COAL_IO_THRESHOLD = 0x000c,
260 HC_IRQ_COAL_TIME_THRESHOLD = 0x0010,
263 SOC_LED_CTRL_BLINK = (1 << 0), /* Active LED blink */
264 SOC_LED_CTRL_ACT_PRESENCE = (1 << 2), /* Multiplex dev presence */
265 /* with dev activity LED */
267 /* Shadow block registers */
269 SHD_CTL_AST = 0x20, /* ofs from SHD_BLK */
272 SATA_STATUS = 0x300, /* ctrl, err regs follow status */
274 FIS_IRQ_CAUSE = 0x364,
275 FIS_IRQ_CAUSE_AN = (1 << 9), /* async notification */
277 LTMODE = 0x30c, /* requires read-after-write */
278 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
283 PHY_MODE4 = 0x314, /* requires read-after-write */
284 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
285 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
286 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
287 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
290 SATA_TESTCTL = 0x348,
292 VENDOR_UNIQUE_FIS = 0x35c,
295 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
296 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
298 PHY_MODE9_GEN2 = 0x398,
299 PHY_MODE9_GEN1 = 0x39c,
300 PHYCFG_OFS = 0x3a0, /* only in 65n devices */
307 MV_M2_PREAMP_MASK = 0x7e0,
311 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
312 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
313 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
314 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
315 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
316 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
317 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
319 EDMA_ERR_IRQ_CAUSE = 0x8,
320 EDMA_ERR_IRQ_MASK = 0xc,
321 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
322 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
323 EDMA_ERR_DEV = (1 << 2), /* device error */
324 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
325 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
326 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
327 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
328 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
329 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
330 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
331 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
332 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
333 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
334 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
336 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
337 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
338 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
339 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
340 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
342 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
344 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
345 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
346 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
347 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
348 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
349 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
351 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
353 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
354 EDMA_ERR_OVERRUN_5 = (1 << 5),
355 EDMA_ERR_UNDERRUN_5 = (1 << 6),
357 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
358 EDMA_ERR_LNK_CTRL_RX_1 |
359 EDMA_ERR_LNK_CTRL_RX_3 |
360 EDMA_ERR_LNK_CTRL_TX,
362 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
372 EDMA_ERR_LNK_CTRL_RX_2 |
373 EDMA_ERR_LNK_DATA_RX |
374 EDMA_ERR_LNK_DATA_TX |
375 EDMA_ERR_TRANS_PROTO,
377 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
382 EDMA_ERR_UNDERRUN_5 |
383 EDMA_ERR_SELF_DIS_5 |
389 EDMA_REQ_Q_BASE_HI = 0x10,
390 EDMA_REQ_Q_IN_PTR = 0x14, /* also contains BASE_LO */
392 EDMA_REQ_Q_OUT_PTR = 0x18,
393 EDMA_REQ_Q_PTR_SHIFT = 5,
395 EDMA_RSP_Q_BASE_HI = 0x1c,
396 EDMA_RSP_Q_IN_PTR = 0x20,
397 EDMA_RSP_Q_OUT_PTR = 0x24, /* also contains BASE_LO */
398 EDMA_RSP_Q_PTR_SHIFT = 3,
400 EDMA_CMD = 0x28, /* EDMA command register */
401 EDMA_EN = (1 << 0), /* enable EDMA */
402 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
403 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
405 EDMA_STATUS = 0x30, /* EDMA engine status */
406 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
407 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
409 EDMA_IORDY_TMOUT = 0x34,
412 EDMA_HALTCOND = 0x60, /* GenIIe halt conditions */
413 EDMA_UNKNOWN_RSVD = 0x6C, /* GenIIe unknown/reserved */
415 BMDMA_CMD = 0x224, /* bmdma command register */
416 BMDMA_STATUS = 0x228, /* bmdma status register */
417 BMDMA_PRD_LOW = 0x22c, /* bmdma PRD addr 31:0 */
418 BMDMA_PRD_HIGH = 0x230, /* bmdma PRD addr 63:32 */
420 /* Host private flags (hp_flags) */
421 MV_HP_FLAG_MSI = (1 << 0),
422 MV_HP_ERRATA_50XXB0 = (1 << 1),
423 MV_HP_ERRATA_50XXB2 = (1 << 2),
424 MV_HP_ERRATA_60X1B2 = (1 << 3),
425 MV_HP_ERRATA_60X1C0 = (1 << 4),
426 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
427 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
428 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
429 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
430 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
431 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
432 MV_HP_QUIRK_LED_BLINK_EN = (1 << 12), /* is led blinking enabled? */
434 /* Port private flags (pp_flags) */
435 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
436 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
437 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
438 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
439 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
442 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
443 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
444 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
445 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
446 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
448 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
449 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
452 /* DMA boundary 0xffff is required by the s/g splitting
453 * we need on /length/ in mv_fill-sg().
455 MV_DMA_BOUNDARY = 0xffffU,
457 /* mask of register bits containing lower 32 bits
458 * of EDMA request queue DMA address
460 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
462 /* ditto, for response queue */
463 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
477 /* Command ReQuest Block: 32B */
493 /* Command ResPonse Block: 8B */
500 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
509 * We keep a local cache of a few frequently accessed port
510 * registers here, to avoid having to read them (very slow)
511 * when switching between EDMA and non-EDMA modes.
513 struct mv_cached_regs {
520 struct mv_port_priv {
521 struct mv_crqb *crqb;
523 struct mv_crpb *crpb;
525 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
526 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
528 unsigned int req_idx;
529 unsigned int resp_idx;
532 struct mv_cached_regs cached;
533 unsigned int delayed_eh_pmp_map;
536 struct mv_port_signal {
541 struct mv_host_priv {
543 unsigned int board_idx;
545 struct mv_port_signal signal[8];
546 const struct mv_hw_ops *ops;
549 void __iomem *main_irq_cause_addr;
550 void __iomem *main_irq_mask_addr;
551 u32 irq_cause_offset;
555 #if defined(CONFIG_HAVE_CLK)
559 * These consistent DMA memory pools give us guaranteed
560 * alignment for hardware-accessed data structures,
561 * and less memory waste in accomplishing the alignment.
563 struct dma_pool *crqb_pool;
564 struct dma_pool *crpb_pool;
565 struct dma_pool *sg_tbl_pool;
569 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
571 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
572 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
574 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
576 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
577 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
580 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
581 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
582 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
583 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
584 static int mv_port_start(struct ata_port *ap);
585 static void mv_port_stop(struct ata_port *ap);
586 static int mv_qc_defer(struct ata_queued_cmd *qc);
587 static void mv_qc_prep(struct ata_queued_cmd *qc);
588 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
589 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
590 static int mv_hardreset(struct ata_link *link, unsigned int *class,
591 unsigned long deadline);
592 static void mv_eh_freeze(struct ata_port *ap);
593 static void mv_eh_thaw(struct ata_port *ap);
594 static void mv6_dev_config(struct ata_device *dev);
596 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
598 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
599 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
601 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
603 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
604 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
606 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
608 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
609 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
611 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
613 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
614 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
616 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
618 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
619 void __iomem *mmio, unsigned int n_hc);
620 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
622 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
623 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
624 void __iomem *mmio, unsigned int port);
625 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
626 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
627 unsigned int port_no);
628 static int mv_stop_edma(struct ata_port *ap);
629 static int mv_stop_edma_engine(void __iomem *port_mmio);
630 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
632 static void mv_pmp_select(struct ata_port *ap, int pmp);
633 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
634 unsigned long deadline);
635 static int mv_softreset(struct ata_link *link, unsigned int *class,
636 unsigned long deadline);
637 static void mv_pmp_error_handler(struct ata_port *ap);
638 static void mv_process_crpb_entries(struct ata_port *ap,
639 struct mv_port_priv *pp);
641 static void mv_sff_irq_clear(struct ata_port *ap);
642 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
643 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
644 static void mv_bmdma_start(struct ata_queued_cmd *qc);
645 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
646 static u8 mv_bmdma_status(struct ata_port *ap);
647 static u8 mv_sff_check_status(struct ata_port *ap);
649 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
650 * because we have to allow room for worst case splitting of
651 * PRDs for 64K boundaries in mv_fill_sg().
653 static struct scsi_host_template mv5_sht = {
654 ATA_BASE_SHT(DRV_NAME),
655 .sg_tablesize = MV_MAX_SG_CT / 2,
656 .dma_boundary = MV_DMA_BOUNDARY,
659 static struct scsi_host_template mv6_sht = {
660 ATA_NCQ_SHT(DRV_NAME),
661 .can_queue = MV_MAX_Q_DEPTH - 1,
662 .sg_tablesize = MV_MAX_SG_CT / 2,
663 .dma_boundary = MV_DMA_BOUNDARY,
666 static struct ata_port_operations mv5_ops = {
667 .inherits = &ata_sff_port_ops,
669 .lost_interrupt = ATA_OP_NULL,
671 .qc_defer = mv_qc_defer,
672 .qc_prep = mv_qc_prep,
673 .qc_issue = mv_qc_issue,
675 .freeze = mv_eh_freeze,
677 .hardreset = mv_hardreset,
678 .error_handler = ata_std_error_handler, /* avoid SFF EH */
679 .post_internal_cmd = ATA_OP_NULL,
681 .scr_read = mv5_scr_read,
682 .scr_write = mv5_scr_write,
684 .port_start = mv_port_start,
685 .port_stop = mv_port_stop,
688 static struct ata_port_operations mv6_ops = {
689 .inherits = &mv5_ops,
690 .dev_config = mv6_dev_config,
691 .scr_read = mv_scr_read,
692 .scr_write = mv_scr_write,
694 .pmp_hardreset = mv_pmp_hardreset,
695 .pmp_softreset = mv_softreset,
696 .softreset = mv_softreset,
697 .error_handler = mv_pmp_error_handler,
699 .sff_check_status = mv_sff_check_status,
700 .sff_irq_clear = mv_sff_irq_clear,
701 .check_atapi_dma = mv_check_atapi_dma,
702 .bmdma_setup = mv_bmdma_setup,
703 .bmdma_start = mv_bmdma_start,
704 .bmdma_stop = mv_bmdma_stop,
705 .bmdma_status = mv_bmdma_status,
708 static struct ata_port_operations mv_iie_ops = {
709 .inherits = &mv6_ops,
710 .dev_config = ATA_OP_NULL,
711 .qc_prep = mv_qc_prep_iie,
714 static const struct ata_port_info mv_port_info[] = {
716 .flags = MV_GEN_I_FLAGS,
717 .pio_mask = ATA_PIO4,
718 .udma_mask = ATA_UDMA6,
719 .port_ops = &mv5_ops,
722 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
723 .pio_mask = ATA_PIO4,
724 .udma_mask = ATA_UDMA6,
725 .port_ops = &mv5_ops,
728 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
729 .pio_mask = ATA_PIO4,
730 .udma_mask = ATA_UDMA6,
731 .port_ops = &mv5_ops,
734 .flags = MV_GEN_II_FLAGS,
735 .pio_mask = ATA_PIO4,
736 .udma_mask = ATA_UDMA6,
737 .port_ops = &mv6_ops,
740 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
741 .pio_mask = ATA_PIO4,
742 .udma_mask = ATA_UDMA6,
743 .port_ops = &mv6_ops,
746 .flags = MV_GEN_IIE_FLAGS,
747 .pio_mask = ATA_PIO4,
748 .udma_mask = ATA_UDMA6,
749 .port_ops = &mv_iie_ops,
752 .flags = MV_GEN_IIE_FLAGS,
753 .pio_mask = ATA_PIO4,
754 .udma_mask = ATA_UDMA6,
755 .port_ops = &mv_iie_ops,
758 .flags = MV_GEN_IIE_FLAGS,
759 .pio_mask = ATA_PIO4,
760 .udma_mask = ATA_UDMA6,
761 .port_ops = &mv_iie_ops,
765 static const struct pci_device_id mv_pci_tbl[] = {
766 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
767 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
768 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
769 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
770 /* RocketRAID 1720/174x have different identifiers */
771 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
772 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
773 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
775 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
776 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
777 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
778 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
779 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
781 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
784 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
786 /* Marvell 7042 support */
787 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
789 /* Highpoint RocketRAID PCIe series */
790 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
791 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
793 { } /* terminate list */
796 static const struct mv_hw_ops mv5xxx_ops = {
797 .phy_errata = mv5_phy_errata,
798 .enable_leds = mv5_enable_leds,
799 .read_preamp = mv5_read_preamp,
800 .reset_hc = mv5_reset_hc,
801 .reset_flash = mv5_reset_flash,
802 .reset_bus = mv5_reset_bus,
805 static const struct mv_hw_ops mv6xxx_ops = {
806 .phy_errata = mv6_phy_errata,
807 .enable_leds = mv6_enable_leds,
808 .read_preamp = mv6_read_preamp,
809 .reset_hc = mv6_reset_hc,
810 .reset_flash = mv6_reset_flash,
811 .reset_bus = mv_reset_pci_bus,
814 static const struct mv_hw_ops mv_soc_ops = {
815 .phy_errata = mv6_phy_errata,
816 .enable_leds = mv_soc_enable_leds,
817 .read_preamp = mv_soc_read_preamp,
818 .reset_hc = mv_soc_reset_hc,
819 .reset_flash = mv_soc_reset_flash,
820 .reset_bus = mv_soc_reset_bus,
823 static const struct mv_hw_ops mv_soc_65n_ops = {
824 .phy_errata = mv_soc_65n_phy_errata,
825 .enable_leds = mv_soc_enable_leds,
826 .reset_hc = mv_soc_reset_hc,
827 .reset_flash = mv_soc_reset_flash,
828 .reset_bus = mv_soc_reset_bus,
835 static inline void writelfl(unsigned long data, void __iomem *addr)
838 (void) readl(addr); /* flush to avoid PCI posted write */
841 static inline unsigned int mv_hc_from_port(unsigned int port)
843 return port >> MV_PORT_HC_SHIFT;
846 static inline unsigned int mv_hardport_from_port(unsigned int port)
848 return port & MV_PORT_MASK;
852 * Consolidate some rather tricky bit shift calculations.
853 * This is hot-path stuff, so not a function.
854 * Simple code, with two return values, so macro rather than inline.
856 * port is the sole input, in range 0..7.
857 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
858 * hardport is the other output, in range 0..3.
860 * Note that port and hardport may be the same variable in some cases.
862 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
864 shift = mv_hc_from_port(port) * HC_SHIFT; \
865 hardport = mv_hardport_from_port(port); \
866 shift += hardport * 2; \
869 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
871 return (base + SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
874 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
877 return mv_hc_base(base, mv_hc_from_port(port));
880 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
882 return mv_hc_base_from_port(base, port) +
883 MV_SATAHC_ARBTR_REG_SZ +
884 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
887 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
889 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
890 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
892 return hc_mmio + ofs;
895 static inline void __iomem *mv_host_base(struct ata_host *host)
897 struct mv_host_priv *hpriv = host->private_data;
901 static inline void __iomem *mv_ap_base(struct ata_port *ap)
903 return mv_port_base(mv_host_base(ap->host), ap->port_no);
906 static inline int mv_get_hc_count(unsigned long port_flags)
908 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
912 * mv_save_cached_regs - (re-)initialize cached port registers
913 * @ap: the port whose registers we are caching
915 * Initialize the local cache of port registers,
916 * so that reading them over and over again can
917 * be avoided on the hotter paths of this driver.
918 * This saves a few microseconds each time we switch
919 * to/from EDMA mode to perform (eg.) a drive cache flush.
921 static void mv_save_cached_regs(struct ata_port *ap)
923 void __iomem *port_mmio = mv_ap_base(ap);
924 struct mv_port_priv *pp = ap->private_data;
926 pp->cached.fiscfg = readl(port_mmio + FISCFG);
927 pp->cached.ltmode = readl(port_mmio + LTMODE);
928 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND);
929 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD);
933 * mv_write_cached_reg - write to a cached port register
934 * @addr: hardware address of the register
935 * @old: pointer to cached value of the register
936 * @new: new value for the register
938 * Write a new value to a cached register,
939 * but only if the value is different from before.
941 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
947 * Workaround for 88SX60x1-B2 FEr SATA#13:
948 * Read-after-write is needed to prevent generating 64-bit
949 * write cycles on the PCI bus for SATA interface registers
950 * at offsets ending in 0x4 or 0xc.
952 * Looks like a lot of fuss, but it avoids an unnecessary
953 * +1 usec read-after-write delay for unaffected registers.
955 laddr = (long)addr & 0xffff;
956 if (laddr >= 0x300 && laddr <= 0x33c) {
958 if (laddr == 0x4 || laddr == 0xc) {
959 writelfl(new, addr); /* read after write */
963 writel(new, addr); /* unaffected by the errata */
967 static void mv_set_edma_ptrs(void __iomem *port_mmio,
968 struct mv_host_priv *hpriv,
969 struct mv_port_priv *pp)
974 * initialize request queue
976 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
977 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
979 WARN_ON(pp->crqb_dma & 0x3ff);
980 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI);
981 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
982 port_mmio + EDMA_REQ_Q_IN_PTR);
983 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR);
986 * initialize response queue
988 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
989 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
991 WARN_ON(pp->crpb_dma & 0xff);
992 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI);
993 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR);
994 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
995 port_mmio + EDMA_RSP_Q_OUT_PTR);
998 static void mv_write_main_irq_mask(u32 mask, struct mv_host_priv *hpriv)
1001 * When writing to the main_irq_mask in hardware,
1002 * we must ensure exclusivity between the interrupt coalescing bits
1003 * and the corresponding individual port DONE_IRQ bits.
1005 * Note that this register is really an "IRQ enable" register,
1006 * not an "IRQ mask" register as Marvell's naming might suggest.
1008 if (mask & (ALL_PORTS_COAL_DONE | PORTS_0_3_COAL_DONE))
1009 mask &= ~DONE_IRQ_0_3;
1010 if (mask & (ALL_PORTS_COAL_DONE | PORTS_4_7_COAL_DONE))
1011 mask &= ~DONE_IRQ_4_7;
1012 writelfl(mask, hpriv->main_irq_mask_addr);
1015 static void mv_set_main_irq_mask(struct ata_host *host,
1016 u32 disable_bits, u32 enable_bits)
1018 struct mv_host_priv *hpriv = host->private_data;
1019 u32 old_mask, new_mask;
1021 old_mask = hpriv->main_irq_mask;
1022 new_mask = (old_mask & ~disable_bits) | enable_bits;
1023 if (new_mask != old_mask) {
1024 hpriv->main_irq_mask = new_mask;
1025 mv_write_main_irq_mask(new_mask, hpriv);
1029 static void mv_enable_port_irqs(struct ata_port *ap,
1030 unsigned int port_bits)
1032 unsigned int shift, hardport, port = ap->port_no;
1033 u32 disable_bits, enable_bits;
1035 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1037 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
1038 enable_bits = port_bits << shift;
1039 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
1042 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
1043 void __iomem *port_mmio,
1044 unsigned int port_irqs)
1046 struct mv_host_priv *hpriv = ap->host->private_data;
1047 int hardport = mv_hardport_from_port(ap->port_no);
1048 void __iomem *hc_mmio = mv_hc_base_from_port(
1049 mv_host_base(ap->host), ap->port_no);
1052 /* clear EDMA event indicators, if any */
1053 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
1055 /* clear pending irq events */
1056 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
1057 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
1059 /* clear FIS IRQ Cause */
1060 if (IS_GEN_IIE(hpriv))
1061 writelfl(0, port_mmio + FIS_IRQ_CAUSE);
1063 mv_enable_port_irqs(ap, port_irqs);
1066 static void mv_set_irq_coalescing(struct ata_host *host,
1067 unsigned int count, unsigned int usecs)
1069 struct mv_host_priv *hpriv = host->private_data;
1070 void __iomem *mmio = hpriv->base, *hc_mmio;
1071 u32 coal_enable = 0;
1072 unsigned long flags;
1073 unsigned int clks, is_dual_hc = hpriv->n_ports > MV_PORTS_PER_HC;
1074 const u32 coal_disable = PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
1075 ALL_PORTS_COAL_DONE;
1077 /* Disable IRQ coalescing if either threshold is zero */
1078 if (!usecs || !count) {
1081 /* Respect maximum limits of the hardware */
1082 clks = usecs * COAL_CLOCKS_PER_USEC;
1083 if (clks > MAX_COAL_TIME_THRESHOLD)
1084 clks = MAX_COAL_TIME_THRESHOLD;
1085 if (count > MAX_COAL_IO_COUNT)
1086 count = MAX_COAL_IO_COUNT;
1089 spin_lock_irqsave(&host->lock, flags);
1090 mv_set_main_irq_mask(host, coal_disable, 0);
1092 if (is_dual_hc && !IS_GEN_I(hpriv)) {
1094 * GEN_II/GEN_IIE with dual host controllers:
1095 * one set of global thresholds for the entire chip.
1097 writel(clks, mmio + IRQ_COAL_TIME_THRESHOLD);
1098 writel(count, mmio + IRQ_COAL_IO_THRESHOLD);
1099 /* clear leftover coal IRQ bit */
1100 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
1102 coal_enable = ALL_PORTS_COAL_DONE;
1103 clks = count = 0; /* force clearing of regular regs below */
1107 * All chips: independent thresholds for each HC on the chip.
1109 hc_mmio = mv_hc_base_from_port(mmio, 0);
1110 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1111 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1112 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1114 coal_enable |= PORTS_0_3_COAL_DONE;
1116 hc_mmio = mv_hc_base_from_port(mmio, MV_PORTS_PER_HC);
1117 writel(clks, hc_mmio + HC_IRQ_COAL_TIME_THRESHOLD);
1118 writel(count, hc_mmio + HC_IRQ_COAL_IO_THRESHOLD);
1119 writel(~HC_COAL_IRQ, hc_mmio + HC_IRQ_CAUSE);
1121 coal_enable |= PORTS_4_7_COAL_DONE;
1124 mv_set_main_irq_mask(host, 0, coal_enable);
1125 spin_unlock_irqrestore(&host->lock, flags);
1129 * mv_start_edma - Enable eDMA engine
1130 * @base: port base address
1131 * @pp: port private data
1133 * Verify the local cache of the eDMA state is accurate with a
1137 * Inherited from caller.
1139 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
1140 struct mv_port_priv *pp, u8 protocol)
1142 int want_ncq = (protocol == ATA_PROT_NCQ);
1144 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1145 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
1146 if (want_ncq != using_ncq)
1149 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1150 struct mv_host_priv *hpriv = ap->host->private_data;
1152 mv_edma_cfg(ap, want_ncq, 1);
1154 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1155 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
1157 writelfl(EDMA_EN, port_mmio + EDMA_CMD);
1158 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
1162 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
1164 void __iomem *port_mmio = mv_ap_base(ap);
1165 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
1166 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
1170 * Wait for the EDMA engine to finish transactions in progress.
1171 * No idea what a good "timeout" value might be, but measurements
1172 * indicate that it often requires hundreds of microseconds
1173 * with two drives in-use. So we use the 15msec value above
1174 * as a rough guess at what even more drives might require.
1176 for (i = 0; i < timeout; ++i) {
1177 u32 edma_stat = readl(port_mmio + EDMA_STATUS);
1178 if ((edma_stat & empty_idle) == empty_idle)
1182 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1186 * mv_stop_edma_engine - Disable eDMA engine
1187 * @port_mmio: io base address
1190 * Inherited from caller.
1192 static int mv_stop_edma_engine(void __iomem *port_mmio)
1196 /* Disable eDMA. The disable bit auto clears. */
1197 writelfl(EDMA_DS, port_mmio + EDMA_CMD);
1199 /* Wait for the chip to confirm eDMA is off. */
1200 for (i = 10000; i > 0; i--) {
1201 u32 reg = readl(port_mmio + EDMA_CMD);
1202 if (!(reg & EDMA_EN))
1209 static int mv_stop_edma(struct ata_port *ap)
1211 void __iomem *port_mmio = mv_ap_base(ap);
1212 struct mv_port_priv *pp = ap->private_data;
1215 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1217 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1218 mv_wait_for_edma_empty_idle(ap);
1219 if (mv_stop_edma_engine(port_mmio)) {
1220 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1223 mv_edma_cfg(ap, 0, 0);
1228 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1231 for (b = 0; b < bytes; ) {
1232 DPRINTK("%p: ", start + b);
1233 for (w = 0; b < bytes && w < 4; w++) {
1234 printk("%08x ", readl(start + b));
1242 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1247 for (b = 0; b < bytes; ) {
1248 DPRINTK("%02x: ", b);
1249 for (w = 0; b < bytes && w < 4; w++) {
1250 (void) pci_read_config_dword(pdev, b, &dw);
1251 printk("%08x ", dw);
1258 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1259 struct pci_dev *pdev)
1262 void __iomem *hc_base = mv_hc_base(mmio_base,
1263 port >> MV_PORT_HC_SHIFT);
1264 void __iomem *port_base;
1265 int start_port, num_ports, p, start_hc, num_hcs, hc;
1268 start_hc = start_port = 0;
1269 num_ports = 8; /* shld be benign for 4 port devs */
1272 start_hc = port >> MV_PORT_HC_SHIFT;
1274 num_ports = num_hcs = 1;
1276 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1277 num_ports > 1 ? num_ports - 1 : start_port);
1280 DPRINTK("PCI config space regs:\n");
1281 mv_dump_pci_cfg(pdev, 0x68);
1283 DPRINTK("PCI regs:\n");
1284 mv_dump_mem(mmio_base+0xc00, 0x3c);
1285 mv_dump_mem(mmio_base+0xd00, 0x34);
1286 mv_dump_mem(mmio_base+0xf00, 0x4);
1287 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1288 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1289 hc_base = mv_hc_base(mmio_base, hc);
1290 DPRINTK("HC regs (HC %i):\n", hc);
1291 mv_dump_mem(hc_base, 0x1c);
1293 for (p = start_port; p < start_port + num_ports; p++) {
1294 port_base = mv_port_base(mmio_base, p);
1295 DPRINTK("EDMA regs (port %i):\n", p);
1296 mv_dump_mem(port_base, 0x54);
1297 DPRINTK("SATA regs (port %i):\n", p);
1298 mv_dump_mem(port_base+0x300, 0x60);
1303 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1307 switch (sc_reg_in) {
1311 ofs = SATA_STATUS + (sc_reg_in * sizeof(u32));
1314 ofs = SATA_ACTIVE; /* active is not with the others */
1323 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1325 unsigned int ofs = mv_scr_offset(sc_reg_in);
1327 if (ofs != 0xffffffffU) {
1328 *val = readl(mv_ap_base(link->ap) + ofs);
1334 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1336 unsigned int ofs = mv_scr_offset(sc_reg_in);
1338 if (ofs != 0xffffffffU) {
1339 void __iomem *addr = mv_ap_base(link->ap) + ofs;
1340 if (sc_reg_in == SCR_CONTROL) {
1342 * Workaround for 88SX60x1 FEr SATA#26:
1344 * COMRESETs have to take care not to accidently
1345 * put the drive to sleep when writing SCR_CONTROL.
1346 * Setting bits 12..15 prevents this problem.
1348 * So if we see an outbound COMMRESET, set those bits.
1349 * Ditto for the followup write that clears the reset.
1351 * The proprietary driver does this for
1352 * all chip versions, and so do we.
1354 if ((val & 0xf) == 1 || (readl(addr) & 0xf) == 1)
1357 writelfl(val, addr);
1363 static void mv6_dev_config(struct ata_device *adev)
1366 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1368 * Gen-II does not support NCQ over a port multiplier
1369 * (no FIS-based switching).
1371 if (adev->flags & ATA_DFLAG_NCQ) {
1372 if (sata_pmp_attached(adev->link->ap)) {
1373 adev->flags &= ~ATA_DFLAG_NCQ;
1374 ata_dev_printk(adev, KERN_INFO,
1375 "NCQ disabled for command-based switching\n");
1380 static int mv_qc_defer(struct ata_queued_cmd *qc)
1382 struct ata_link *link = qc->dev->link;
1383 struct ata_port *ap = link->ap;
1384 struct mv_port_priv *pp = ap->private_data;
1387 * Don't allow new commands if we're in a delayed EH state
1388 * for NCQ and/or FIS-based switching.
1390 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1391 return ATA_DEFER_PORT;
1393 /* PIO commands need exclusive link: no other commands [DMA or PIO]
1394 * can run concurrently.
1395 * set excl_link when we want to send a PIO command in DMA mode
1396 * or a non-NCQ command in NCQ mode.
1397 * When we receive a command from that link, and there are no
1398 * outstanding commands, mark a flag to clear excl_link and let
1399 * the command go through.
1401 if (unlikely(ap->excl_link)) {
1402 if (link == ap->excl_link) {
1403 if (ap->nr_active_links)
1404 return ATA_DEFER_PORT;
1405 qc->flags |= ATA_QCFLAG_CLEAR_EXCL;
1408 return ATA_DEFER_PORT;
1412 * If the port is completely idle, then allow the new qc.
1414 if (ap->nr_active_links == 0)
1418 * The port is operating in host queuing mode (EDMA) with NCQ
1419 * enabled, allow multiple NCQ commands. EDMA also allows
1420 * queueing multiple DMA commands but libata core currently
1423 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1424 (pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1425 if (ata_is_ncq(qc->tf.protocol))
1428 ap->excl_link = link;
1429 return ATA_DEFER_PORT;
1433 return ATA_DEFER_PORT;
1436 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1438 struct mv_port_priv *pp = ap->private_data;
1439 void __iomem *port_mmio;
1441 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1442 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1443 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1445 ltmode = *old_ltmode & ~LTMODE_BIT8;
1446 haltcond = *old_haltcond | EDMA_ERR_DEV;
1449 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1450 ltmode = *old_ltmode | LTMODE_BIT8;
1452 haltcond &= ~EDMA_ERR_DEV;
1454 fiscfg |= FISCFG_WAIT_DEV_ERR;
1456 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1459 port_mmio = mv_ap_base(ap);
1460 mv_write_cached_reg(port_mmio + FISCFG, old_fiscfg, fiscfg);
1461 mv_write_cached_reg(port_mmio + LTMODE, old_ltmode, ltmode);
1462 mv_write_cached_reg(port_mmio + EDMA_HALTCOND, old_haltcond, haltcond);
1465 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1467 struct mv_host_priv *hpriv = ap->host->private_data;
1470 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1471 old = readl(hpriv->base + GPIO_PORT_CTL);
1473 new = old | (1 << 22);
1475 new = old & ~(1 << 22);
1477 writel(new, hpriv->base + GPIO_PORT_CTL);
1481 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1482 * @ap: Port being initialized
1484 * There are two DMA modes on these chips: basic DMA, and EDMA.
1486 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1487 * of basic DMA on the GEN_IIE versions of the chips.
1489 * This bit survives EDMA resets, and must be set for basic DMA
1490 * to function, and should be cleared when EDMA is active.
1492 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1494 struct mv_port_priv *pp = ap->private_data;
1495 u32 new, *old = &pp->cached.unknown_rsvd;
1501 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD, old, new);
1505 * SOC chips have an issue whereby the HDD LEDs don't always blink
1506 * during I/O when NCQ is enabled. Enabling a special "LED blink" mode
1507 * of the SOC takes care of it, generating a steady blink rate when
1508 * any drive on the chip is active.
1510 * Unfortunately, the blink mode is a global hardware setting for the SOC,
1511 * so we must use it whenever at least one port on the SOC has NCQ enabled.
1513 * We turn "LED blink" off when NCQ is not in use anywhere, because the normal
1514 * LED operation works then, and provides better (more accurate) feedback.
1516 * Note that this code assumes that an SOC never has more than one HC onboard.
1518 static void mv_soc_led_blink_enable(struct ata_port *ap)
1520 struct ata_host *host = ap->host;
1521 struct mv_host_priv *hpriv = host->private_data;
1522 void __iomem *hc_mmio;
1525 if (hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN)
1527 hpriv->hp_flags |= MV_HP_QUIRK_LED_BLINK_EN;
1528 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1529 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1530 writel(led_ctrl | SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1533 static void mv_soc_led_blink_disable(struct ata_port *ap)
1535 struct ata_host *host = ap->host;
1536 struct mv_host_priv *hpriv = host->private_data;
1537 void __iomem *hc_mmio;
1541 if (!(hpriv->hp_flags & MV_HP_QUIRK_LED_BLINK_EN))
1544 /* disable led-blink only if no ports are using NCQ */
1545 for (port = 0; port < hpriv->n_ports; port++) {
1546 struct ata_port *this_ap = host->ports[port];
1547 struct mv_port_priv *pp = this_ap->private_data;
1549 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1553 hpriv->hp_flags &= ~MV_HP_QUIRK_LED_BLINK_EN;
1554 hc_mmio = mv_hc_base_from_port(mv_host_base(host), ap->port_no);
1555 led_ctrl = readl(hc_mmio + SOC_LED_CTRL);
1556 writel(led_ctrl & ~SOC_LED_CTRL_BLINK, hc_mmio + SOC_LED_CTRL);
1559 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1562 struct mv_port_priv *pp = ap->private_data;
1563 struct mv_host_priv *hpriv = ap->host->private_data;
1564 void __iomem *port_mmio = mv_ap_base(ap);
1566 /* set up non-NCQ EDMA configuration */
1567 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1569 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1571 if (IS_GEN_I(hpriv))
1572 cfg |= (1 << 8); /* enab config burst size mask */
1574 else if (IS_GEN_II(hpriv)) {
1575 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1576 mv_60x1_errata_sata25(ap, want_ncq);
1578 } else if (IS_GEN_IIE(hpriv)) {
1579 int want_fbs = sata_pmp_attached(ap);
1581 * Possible future enhancement:
1583 * The chip can use FBS with non-NCQ, if we allow it,
1584 * But first we need to have the error handling in place
1585 * for this mode (datasheet section 7.3.15.4.2.3).
1586 * So disallow non-NCQ FBS for now.
1588 want_fbs &= want_ncq;
1590 mv_config_fbs(ap, want_ncq, want_fbs);
1593 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1594 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1597 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1599 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1601 cfg |= (1 << 18); /* enab early completion */
1603 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1604 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1605 mv_bmdma_enable_iie(ap, !want_edma);
1607 if (IS_SOC(hpriv)) {
1609 mv_soc_led_blink_enable(ap);
1611 mv_soc_led_blink_disable(ap);
1616 cfg |= EDMA_CFG_NCQ;
1617 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1620 writelfl(cfg, port_mmio + EDMA_CFG);
1623 static void mv_port_free_dma_mem(struct ata_port *ap)
1625 struct mv_host_priv *hpriv = ap->host->private_data;
1626 struct mv_port_priv *pp = ap->private_data;
1630 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1634 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1638 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1639 * For later hardware, we have one unique sg_tbl per NCQ tag.
1641 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1642 if (pp->sg_tbl[tag]) {
1643 if (tag == 0 || !IS_GEN_I(hpriv))
1644 dma_pool_free(hpriv->sg_tbl_pool,
1646 pp->sg_tbl_dma[tag]);
1647 pp->sg_tbl[tag] = NULL;
1653 * mv_port_start - Port specific init/start routine.
1654 * @ap: ATA channel to manipulate
1656 * Allocate and point to DMA memory, init port private memory,
1660 * Inherited from caller.
1662 static int mv_port_start(struct ata_port *ap)
1664 struct device *dev = ap->host->dev;
1665 struct mv_host_priv *hpriv = ap->host->private_data;
1666 struct mv_port_priv *pp;
1667 unsigned long flags;
1670 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1673 ap->private_data = pp;
1675 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1678 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1680 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1682 goto out_port_free_dma_mem;
1683 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1685 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1686 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1687 ap->flags |= ATA_FLAG_AN;
1689 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1690 * For later hardware, we need one unique sg_tbl per NCQ tag.
1692 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1693 if (tag == 0 || !IS_GEN_I(hpriv)) {
1694 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1695 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1696 if (!pp->sg_tbl[tag])
1697 goto out_port_free_dma_mem;
1699 pp->sg_tbl[tag] = pp->sg_tbl[0];
1700 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1704 spin_lock_irqsave(ap->lock, flags);
1705 mv_save_cached_regs(ap);
1706 mv_edma_cfg(ap, 0, 0);
1707 spin_unlock_irqrestore(ap->lock, flags);
1711 out_port_free_dma_mem:
1712 mv_port_free_dma_mem(ap);
1717 * mv_port_stop - Port specific cleanup/stop routine.
1718 * @ap: ATA channel to manipulate
1720 * Stop DMA, cleanup port memory.
1723 * This routine uses the host lock to protect the DMA stop.
1725 static void mv_port_stop(struct ata_port *ap)
1727 unsigned long flags;
1729 spin_lock_irqsave(ap->lock, flags);
1731 mv_enable_port_irqs(ap, 0);
1732 spin_unlock_irqrestore(ap->lock, flags);
1733 mv_port_free_dma_mem(ap);
1737 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1738 * @qc: queued command whose SG list to source from
1740 * Populate the SG list and mark the last entry.
1743 * Inherited from caller.
1745 static void mv_fill_sg(struct ata_queued_cmd *qc)
1747 struct mv_port_priv *pp = qc->ap->private_data;
1748 struct scatterlist *sg;
1749 struct mv_sg *mv_sg, *last_sg = NULL;
1752 mv_sg = pp->sg_tbl[qc->tag];
1753 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1754 dma_addr_t addr = sg_dma_address(sg);
1755 u32 sg_len = sg_dma_len(sg);
1758 u32 offset = addr & 0xffff;
1761 if (offset + len > 0x10000)
1762 len = 0x10000 - offset;
1764 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1765 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1766 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1767 mv_sg->reserved = 0;
1777 if (likely(last_sg))
1778 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1779 mb(); /* ensure data structure is visible to the chipset */
1782 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1784 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1785 (last ? CRQB_CMD_LAST : 0);
1786 *cmdw = cpu_to_le16(tmp);
1790 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1791 * @ap: Port associated with this ATA transaction.
1793 * We need this only for ATAPI bmdma transactions,
1794 * as otherwise we experience spurious interrupts
1795 * after libata-sff handles the bmdma interrupts.
1797 static void mv_sff_irq_clear(struct ata_port *ap)
1799 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1803 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1804 * @qc: queued command to check for chipset/DMA compatibility.
1806 * The bmdma engines cannot handle speculative data sizes
1807 * (bytecount under/over flow). So only allow DMA for
1808 * data transfer commands with known data sizes.
1811 * Inherited from caller.
1813 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1815 struct scsi_cmnd *scmd = qc->scsicmd;
1818 switch (scmd->cmnd[0]) {
1826 case GPCMD_SEND_DVD_STRUCTURE:
1827 case GPCMD_SEND_CUE_SHEET:
1828 return 0; /* DMA is safe */
1831 return -EOPNOTSUPP; /* use PIO instead */
1835 * mv_bmdma_setup - Set up BMDMA transaction
1836 * @qc: queued command to prepare DMA for.
1839 * Inherited from caller.
1841 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1843 struct ata_port *ap = qc->ap;
1844 void __iomem *port_mmio = mv_ap_base(ap);
1845 struct mv_port_priv *pp = ap->private_data;
1849 /* clear all DMA cmd bits */
1850 writel(0, port_mmio + BMDMA_CMD);
1852 /* load PRD table addr. */
1853 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1854 port_mmio + BMDMA_PRD_HIGH);
1855 writelfl(pp->sg_tbl_dma[qc->tag],
1856 port_mmio + BMDMA_PRD_LOW);
1858 /* issue r/w command */
1859 ap->ops->sff_exec_command(ap, &qc->tf);
1863 * mv_bmdma_start - Start a BMDMA transaction
1864 * @qc: queued command to start DMA on.
1867 * Inherited from caller.
1869 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1871 struct ata_port *ap = qc->ap;
1872 void __iomem *port_mmio = mv_ap_base(ap);
1873 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1874 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1876 /* start host DMA transaction */
1877 writelfl(cmd, port_mmio + BMDMA_CMD);
1881 * mv_bmdma_stop - Stop BMDMA transfer
1882 * @qc: queued command to stop DMA on.
1884 * Clears the ATA_DMA_START flag in the bmdma control register
1887 * Inherited from caller.
1889 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1891 struct ata_port *ap = qc->ap;
1892 void __iomem *port_mmio = mv_ap_base(ap);
1895 /* clear start/stop bit */
1896 cmd = readl(port_mmio + BMDMA_CMD);
1897 cmd &= ~ATA_DMA_START;
1898 writelfl(cmd, port_mmio + BMDMA_CMD);
1900 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1901 ata_sff_dma_pause(ap);
1905 * mv_bmdma_status - Read BMDMA status
1906 * @ap: port for which to retrieve DMA status.
1908 * Read and return equivalent of the sff BMDMA status register.
1911 * Inherited from caller.
1913 static u8 mv_bmdma_status(struct ata_port *ap)
1915 void __iomem *port_mmio = mv_ap_base(ap);
1919 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1920 * and the ATA_DMA_INTR bit doesn't exist.
1922 reg = readl(port_mmio + BMDMA_STATUS);
1923 if (reg & ATA_DMA_ACTIVE)
1924 status = ATA_DMA_ACTIVE;
1926 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1930 static void mv_rw_multi_errata_sata24(struct ata_queued_cmd *qc)
1932 struct ata_taskfile *tf = &qc->tf;
1934 * Workaround for 88SX60x1 FEr SATA#24.
1936 * Chip may corrupt WRITEs if multi_count >= 4kB.
1937 * Note that READs are unaffected.
1939 * It's not clear if this errata really means "4K bytes",
1940 * or if it always happens for multi_count > 7
1941 * regardless of device sector_size.
1943 * So, for safety, any write with multi_count > 7
1944 * gets converted here into a regular PIO write instead:
1946 if ((tf->flags & ATA_TFLAG_WRITE) && is_multi_taskfile(tf)) {
1947 if (qc->dev->multi_count > 7) {
1948 switch (tf->command) {
1949 case ATA_CMD_WRITE_MULTI:
1950 tf->command = ATA_CMD_PIO_WRITE;
1952 case ATA_CMD_WRITE_MULTI_FUA_EXT:
1953 tf->flags &= ~ATA_TFLAG_FUA; /* ugh */
1955 case ATA_CMD_WRITE_MULTI_EXT:
1956 tf->command = ATA_CMD_PIO_WRITE_EXT;
1964 * mv_qc_prep - Host specific command preparation.
1965 * @qc: queued command to prepare
1967 * This routine simply redirects to the general purpose routine
1968 * if command is not DMA. Else, it handles prep of the CRQB
1969 * (command request block), does some sanity checking, and calls
1970 * the SG load routine.
1973 * Inherited from caller.
1975 static void mv_qc_prep(struct ata_queued_cmd *qc)
1977 struct ata_port *ap = qc->ap;
1978 struct mv_port_priv *pp = ap->private_data;
1980 struct ata_taskfile *tf = &qc->tf;
1984 switch (tf->protocol) {
1987 break; /* continue below */
1989 mv_rw_multi_errata_sata24(qc);
1995 /* Fill in command request block
1997 if (!(tf->flags & ATA_TFLAG_WRITE))
1998 flags |= CRQB_FLAG_READ;
1999 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2000 flags |= qc->tag << CRQB_TAG_SHIFT;
2001 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2003 /* get current queue index from software */
2004 in_index = pp->req_idx;
2006 pp->crqb[in_index].sg_addr =
2007 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2008 pp->crqb[in_index].sg_addr_hi =
2009 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2010 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
2012 cw = &pp->crqb[in_index].ata_cmd[0];
2014 /* Sadly, the CRQB cannot accomodate all registers--there are
2015 * only 11 bytes...so we must pick and choose required
2016 * registers based on the command. So, we drop feature and
2017 * hob_feature for [RW] DMA commands, but they are needed for
2018 * NCQ. NCQ will drop hob_nsect, which is not needed there
2019 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
2021 switch (tf->command) {
2023 case ATA_CMD_READ_EXT:
2025 case ATA_CMD_WRITE_EXT:
2026 case ATA_CMD_WRITE_FUA_EXT:
2027 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
2029 case ATA_CMD_FPDMA_READ:
2030 case ATA_CMD_FPDMA_WRITE:
2031 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
2032 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
2035 /* The only other commands EDMA supports in non-queued and
2036 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
2037 * of which are defined/used by Linux. If we get here, this
2038 * driver needs work.
2040 * FIXME: modify libata to give qc_prep a return value and
2041 * return error here.
2043 BUG_ON(tf->command);
2046 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
2047 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
2048 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
2049 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
2050 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
2051 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
2052 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
2053 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
2054 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
2056 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2062 * mv_qc_prep_iie - Host specific command preparation.
2063 * @qc: queued command to prepare
2065 * This routine simply redirects to the general purpose routine
2066 * if command is not DMA. Else, it handles prep of the CRQB
2067 * (command request block), does some sanity checking, and calls
2068 * the SG load routine.
2071 * Inherited from caller.
2073 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
2075 struct ata_port *ap = qc->ap;
2076 struct mv_port_priv *pp = ap->private_data;
2077 struct mv_crqb_iie *crqb;
2078 struct ata_taskfile *tf = &qc->tf;
2082 if ((tf->protocol != ATA_PROT_DMA) &&
2083 (tf->protocol != ATA_PROT_NCQ))
2086 /* Fill in Gen IIE command request block */
2087 if (!(tf->flags & ATA_TFLAG_WRITE))
2088 flags |= CRQB_FLAG_READ;
2090 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
2091 flags |= qc->tag << CRQB_TAG_SHIFT;
2092 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
2093 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
2095 /* get current queue index from software */
2096 in_index = pp->req_idx;
2098 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
2099 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
2100 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
2101 crqb->flags = cpu_to_le32(flags);
2103 crqb->ata_cmd[0] = cpu_to_le32(
2104 (tf->command << 16) |
2107 crqb->ata_cmd[1] = cpu_to_le32(
2113 crqb->ata_cmd[2] = cpu_to_le32(
2114 (tf->hob_lbal << 0) |
2115 (tf->hob_lbam << 8) |
2116 (tf->hob_lbah << 16) |
2117 (tf->hob_feature << 24)
2119 crqb->ata_cmd[3] = cpu_to_le32(
2121 (tf->hob_nsect << 8)
2124 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2130 * mv_sff_check_status - fetch device status, if valid
2131 * @ap: ATA port to fetch status from
2133 * When using command issue via mv_qc_issue_fis(),
2134 * the initial ATA_BUSY state does not show up in the
2135 * ATA status (shadow) register. This can confuse libata!
2137 * So we have a hook here to fake ATA_BUSY for that situation,
2138 * until the first time a BUSY, DRQ, or ERR bit is seen.
2140 * The rest of the time, it simply returns the ATA status register.
2142 static u8 mv_sff_check_status(struct ata_port *ap)
2144 u8 stat = ioread8(ap->ioaddr.status_addr);
2145 struct mv_port_priv *pp = ap->private_data;
2147 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
2148 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
2149 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
2157 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
2158 * @fis: fis to be sent
2159 * @nwords: number of 32-bit words in the fis
2161 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
2163 void __iomem *port_mmio = mv_ap_base(ap);
2164 u32 ifctl, old_ifctl, ifstat;
2165 int i, timeout = 200, final_word = nwords - 1;
2167 /* Initiate FIS transmission mode */
2168 old_ifctl = readl(port_mmio + SATA_IFCTL);
2169 ifctl = 0x100 | (old_ifctl & 0xf);
2170 writelfl(ifctl, port_mmio + SATA_IFCTL);
2172 /* Send all words of the FIS except for the final word */
2173 for (i = 0; i < final_word; ++i)
2174 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS);
2176 /* Flag end-of-transmission, and then send the final word */
2177 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL);
2178 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS);
2181 * Wait for FIS transmission to complete.
2182 * This typically takes just a single iteration.
2185 ifstat = readl(port_mmio + SATA_IFSTAT);
2186 } while (!(ifstat & 0x1000) && --timeout);
2188 /* Restore original port configuration */
2189 writelfl(old_ifctl, port_mmio + SATA_IFCTL);
2191 /* See if it worked */
2192 if ((ifstat & 0x3000) != 0x1000) {
2193 ata_port_printk(ap, KERN_WARNING,
2194 "%s transmission error, ifstat=%08x\n",
2196 return AC_ERR_OTHER;
2202 * mv_qc_issue_fis - Issue a command directly as a FIS
2203 * @qc: queued command to start
2205 * Note that the ATA shadow registers are not updated
2206 * after command issue, so the device will appear "READY"
2207 * if polled, even while it is BUSY processing the command.
2209 * So we use a status hook to fake ATA_BUSY until the drive changes state.
2211 * Note: we don't get updated shadow regs on *completion*
2212 * of non-data commands. So avoid sending them via this function,
2213 * as they will appear to have completed immediately.
2215 * GEN_IIE has special registers that we could get the result tf from,
2216 * but earlier chipsets do not. For now, we ignore those registers.
2218 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
2220 struct ata_port *ap = qc->ap;
2221 struct mv_port_priv *pp = ap->private_data;
2222 struct ata_link *link = qc->dev->link;
2226 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
2227 err = mv_send_fis(ap, fis, ARRAY_SIZE(fis));
2231 switch (qc->tf.protocol) {
2232 case ATAPI_PROT_PIO:
2233 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2235 case ATAPI_PROT_NODATA:
2236 ap->hsm_task_state = HSM_ST_FIRST;
2239 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
2240 if (qc->tf.flags & ATA_TFLAG_WRITE)
2241 ap->hsm_task_state = HSM_ST_FIRST;
2243 ap->hsm_task_state = HSM_ST;
2246 ap->hsm_task_state = HSM_ST_LAST;
2250 if (qc->tf.flags & ATA_TFLAG_POLLING)
2251 ata_pio_queue_task(ap, qc, 0);
2256 * mv_qc_issue - Initiate a command to the host
2257 * @qc: queued command to start
2259 * This routine simply redirects to the general purpose routine
2260 * if command is not DMA. Else, it sanity checks our local
2261 * caches of the request producer/consumer indices then enables
2262 * DMA and bumps the request producer index.
2265 * Inherited from caller.
2267 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
2269 static int limit_warnings = 10;
2270 struct ata_port *ap = qc->ap;
2271 void __iomem *port_mmio = mv_ap_base(ap);
2272 struct mv_port_priv *pp = ap->private_data;
2274 unsigned int port_irqs;
2276 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
2278 switch (qc->tf.protocol) {
2281 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
2282 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2283 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
2285 /* Write the request in pointer to kick the EDMA to life */
2286 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
2287 port_mmio + EDMA_REQ_Q_IN_PTR);
2292 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
2294 * Someday, we might implement special polling workarounds
2295 * for these, but it all seems rather unnecessary since we
2296 * normally use only DMA for commands which transfer more
2297 * than a single block of data.
2299 * Much of the time, this could just work regardless.
2300 * So for now, just log the incident, and allow the attempt.
2302 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
2304 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
2305 ": attempting PIO w/multiple DRQ: "
2306 "this may fail due to h/w errata\n");
2309 case ATA_PROT_NODATA:
2310 case ATAPI_PROT_PIO:
2311 case ATAPI_PROT_NODATA:
2312 if (ap->flags & ATA_FLAG_PIO_POLLING)
2313 qc->tf.flags |= ATA_TFLAG_POLLING;
2317 if (qc->tf.flags & ATA_TFLAG_POLLING)
2318 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
2320 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
2323 * We're about to send a non-EDMA capable command to the
2324 * port. Turn off EDMA so there won't be problems accessing
2325 * shadow block, etc registers.
2328 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
2329 mv_pmp_select(ap, qc->dev->link->pmp);
2331 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2332 struct mv_host_priv *hpriv = ap->host->private_data;
2334 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2336 * After any NCQ error, the READ_LOG_EXT command
2337 * from libata-eh *must* use mv_qc_issue_fis().
2338 * Otherwise it might fail, due to chip errata.
2340 * Rather than special-case it, we'll just *always*
2341 * use this method here for READ_LOG_EXT, making for
2344 if (IS_GEN_II(hpriv))
2345 return mv_qc_issue_fis(qc);
2347 return ata_sff_qc_issue(qc);
2350 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2352 struct mv_port_priv *pp = ap->private_data;
2353 struct ata_queued_cmd *qc;
2355 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2357 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2358 if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
2363 static void mv_pmp_error_handler(struct ata_port *ap)
2365 unsigned int pmp, pmp_map;
2366 struct mv_port_priv *pp = ap->private_data;
2368 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2370 * Perform NCQ error analysis on failed PMPs
2371 * before we freeze the port entirely.
2373 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2375 pmp_map = pp->delayed_eh_pmp_map;
2376 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2377 for (pmp = 0; pmp_map != 0; pmp++) {
2378 unsigned int this_pmp = (1 << pmp);
2379 if (pmp_map & this_pmp) {
2380 struct ata_link *link = &ap->pmp_link[pmp];
2381 pmp_map &= ~this_pmp;
2382 ata_eh_analyze_ncq_error(link);
2385 ata_port_freeze(ap);
2387 sata_pmp_error_handler(ap);
2390 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2392 void __iomem *port_mmio = mv_ap_base(ap);
2394 return readl(port_mmio + SATA_TESTCTL) >> 16;
2397 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2399 struct ata_eh_info *ehi;
2403 * Initialize EH info for PMPs which saw device errors
2405 ehi = &ap->link.eh_info;
2406 for (pmp = 0; pmp_map != 0; pmp++) {
2407 unsigned int this_pmp = (1 << pmp);
2408 if (pmp_map & this_pmp) {
2409 struct ata_link *link = &ap->pmp_link[pmp];
2411 pmp_map &= ~this_pmp;
2412 ehi = &link->eh_info;
2413 ata_ehi_clear_desc(ehi);
2414 ata_ehi_push_desc(ehi, "dev err");
2415 ehi->err_mask |= AC_ERR_DEV;
2416 ehi->action |= ATA_EH_RESET;
2417 ata_link_abort(link);
2422 static int mv_req_q_empty(struct ata_port *ap)
2424 void __iomem *port_mmio = mv_ap_base(ap);
2425 u32 in_ptr, out_ptr;
2427 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
2428 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2429 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
2430 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2431 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2434 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2436 struct mv_port_priv *pp = ap->private_data;
2438 unsigned int old_map, new_map;
2441 * Device error during FBS+NCQ operation:
2443 * Set a port flag to prevent further I/O being enqueued.
2444 * Leave the EDMA running to drain outstanding commands from this port.
2445 * Perform the post-mortem/EH only when all responses are complete.
2446 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2448 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2449 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2450 pp->delayed_eh_pmp_map = 0;
2452 old_map = pp->delayed_eh_pmp_map;
2453 new_map = old_map | mv_get_err_pmp_map(ap);
2455 if (old_map != new_map) {
2456 pp->delayed_eh_pmp_map = new_map;
2457 mv_pmp_eh_prep(ap, new_map & ~old_map);
2459 failed_links = hweight16(new_map);
2461 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2462 "failed_links=%d nr_active_links=%d\n",
2463 __func__, pp->delayed_eh_pmp_map,
2464 ap->qc_active, failed_links,
2465 ap->nr_active_links);
2467 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2468 mv_process_crpb_entries(ap, pp);
2471 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2472 return 1; /* handled */
2474 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2475 return 1; /* handled */
2478 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2481 * Possible future enhancement:
2483 * FBS+non-NCQ operation is not yet implemented.
2484 * See related notes in mv_edma_cfg().
2486 * Device error during FBS+non-NCQ operation:
2488 * We need to snapshot the shadow registers for each failed command.
2489 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2491 return 0; /* not handled */
2494 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2496 struct mv_port_priv *pp = ap->private_data;
2498 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2499 return 0; /* EDMA was not active: not handled */
2500 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2501 return 0; /* FBS was not active: not handled */
2503 if (!(edma_err_cause & EDMA_ERR_DEV))
2504 return 0; /* non DEV error: not handled */
2505 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2506 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2507 return 0; /* other problems: not handled */
2509 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2511 * EDMA should NOT have self-disabled for this case.
2512 * If it did, then something is wrong elsewhere,
2513 * and we cannot handle it here.
2515 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2516 ata_port_printk(ap, KERN_WARNING,
2517 "%s: err_cause=0x%x pp_flags=0x%x\n",
2518 __func__, edma_err_cause, pp->pp_flags);
2519 return 0; /* not handled */
2521 return mv_handle_fbs_ncq_dev_err(ap);
2524 * EDMA should have self-disabled for this case.
2525 * If it did not, then something is wrong elsewhere,
2526 * and we cannot handle it here.
2528 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2529 ata_port_printk(ap, KERN_WARNING,
2530 "%s: err_cause=0x%x pp_flags=0x%x\n",
2531 __func__, edma_err_cause, pp->pp_flags);
2532 return 0; /* not handled */
2534 return mv_handle_fbs_non_ncq_dev_err(ap);
2536 return 0; /* not handled */
2539 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2541 struct ata_eh_info *ehi = &ap->link.eh_info;
2542 char *when = "idle";
2544 ata_ehi_clear_desc(ehi);
2545 if (edma_was_enabled) {
2546 when = "EDMA enabled";
2548 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2549 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2552 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2553 ehi->err_mask |= AC_ERR_OTHER;
2554 ehi->action |= ATA_EH_RESET;
2555 ata_port_freeze(ap);
2559 * mv_err_intr - Handle error interrupts on the port
2560 * @ap: ATA channel to manipulate
2562 * Most cases require a full reset of the chip's state machine,
2563 * which also performs a COMRESET.
2564 * Also, if the port disabled DMA, update our cached copy to match.
2567 * Inherited from caller.
2569 static void mv_err_intr(struct ata_port *ap)
2571 void __iomem *port_mmio = mv_ap_base(ap);
2572 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2574 struct mv_port_priv *pp = ap->private_data;
2575 struct mv_host_priv *hpriv = ap->host->private_data;
2576 unsigned int action = 0, err_mask = 0;
2577 struct ata_eh_info *ehi = &ap->link.eh_info;
2578 struct ata_queued_cmd *qc;
2582 * Read and clear the SError and err_cause bits.
2583 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2584 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2586 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2587 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2589 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE);
2590 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2591 fis_cause = readl(port_mmio + FIS_IRQ_CAUSE);
2592 writelfl(~fis_cause, port_mmio + FIS_IRQ_CAUSE);
2594 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE);
2596 if (edma_err_cause & EDMA_ERR_DEV) {
2598 * Device errors during FIS-based switching operation
2599 * require special handling.
2601 if (mv_handle_dev_err(ap, edma_err_cause))
2605 qc = mv_get_active_qc(ap);
2606 ata_ehi_clear_desc(ehi);
2607 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2608 edma_err_cause, pp->pp_flags);
2610 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2611 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2612 if (fis_cause & FIS_IRQ_CAUSE_AN) {
2613 u32 ec = edma_err_cause &
2614 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2615 sata_async_notification(ap);
2617 return; /* Just an AN; no need for the nukes */
2618 ata_ehi_push_desc(ehi, "SDB notify");
2622 * All generations share these EDMA error cause bits:
2624 if (edma_err_cause & EDMA_ERR_DEV) {
2625 err_mask |= AC_ERR_DEV;
2626 action |= ATA_EH_RESET;
2627 ata_ehi_push_desc(ehi, "dev error");
2629 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2630 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2631 EDMA_ERR_INTRL_PAR)) {
2632 err_mask |= AC_ERR_ATA_BUS;
2633 action |= ATA_EH_RESET;
2634 ata_ehi_push_desc(ehi, "parity error");
2636 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2637 ata_ehi_hotplugged(ehi);
2638 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2639 "dev disconnect" : "dev connect");
2640 action |= ATA_EH_RESET;
2644 * Gen-I has a different SELF_DIS bit,
2645 * different FREEZE bits, and no SERR bit:
2647 if (IS_GEN_I(hpriv)) {
2648 eh_freeze_mask = EDMA_EH_FREEZE_5;
2649 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2650 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2651 ata_ehi_push_desc(ehi, "EDMA self-disable");
2654 eh_freeze_mask = EDMA_EH_FREEZE;
2655 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2656 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2657 ata_ehi_push_desc(ehi, "EDMA self-disable");
2659 if (edma_err_cause & EDMA_ERR_SERR) {
2660 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2661 err_mask |= AC_ERR_ATA_BUS;
2662 action |= ATA_EH_RESET;
2667 err_mask = AC_ERR_OTHER;
2668 action |= ATA_EH_RESET;
2671 ehi->serror |= serr;
2672 ehi->action |= action;
2675 qc->err_mask |= err_mask;
2677 ehi->err_mask |= err_mask;
2679 if (err_mask == AC_ERR_DEV) {
2681 * Cannot do ata_port_freeze() here,
2682 * because it would kill PIO access,
2683 * which is needed for further diagnosis.
2687 } else if (edma_err_cause & eh_freeze_mask) {
2689 * Note to self: ata_port_freeze() calls ata_port_abort()
2691 ata_port_freeze(ap);
2698 ata_link_abort(qc->dev->link);
2704 static void mv_process_crpb_response(struct ata_port *ap,
2705 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2707 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2711 u16 edma_status = le16_to_cpu(response->flags);
2713 * edma_status from a response queue entry:
2714 * LSB is from EDMA_ERR_IRQ_CAUSE (non-NCQ only).
2715 * MSB is saved ATA status from command completion.
2718 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2721 * Error will be seen/handled by mv_err_intr().
2722 * So do nothing at all here.
2727 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2728 if (!ac_err_mask(ata_status))
2729 ata_qc_complete(qc);
2730 /* else: leave it for mv_err_intr() */
2732 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2737 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2739 void __iomem *port_mmio = mv_ap_base(ap);
2740 struct mv_host_priv *hpriv = ap->host->private_data;
2742 bool work_done = false;
2743 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2745 /* Get the hardware queue position index */
2746 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR)
2747 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2749 /* Process new responses from since the last time we looked */
2750 while (in_index != pp->resp_idx) {
2752 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2754 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2756 if (IS_GEN_I(hpriv)) {
2757 /* 50xx: no NCQ, only one command active at a time */
2758 tag = ap->link.active_tag;
2760 /* Gen II/IIE: get command tag from CRPB entry */
2761 tag = le16_to_cpu(response->id) & 0x1f;
2763 mv_process_crpb_response(ap, response, tag, ncq_enabled);
2767 /* Update the software queue position index in hardware */
2769 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2770 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2771 port_mmio + EDMA_RSP_Q_OUT_PTR);
2774 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2776 struct mv_port_priv *pp;
2777 int edma_was_enabled;
2780 * Grab a snapshot of the EDMA_EN flag setting,
2781 * so that we have a consistent view for this port,
2782 * even if something we call of our routines changes it.
2784 pp = ap->private_data;
2785 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2787 * Process completed CRPB response(s) before other events.
2789 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2790 mv_process_crpb_entries(ap, pp);
2791 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2792 mv_handle_fbs_ncq_dev_err(ap);
2795 * Handle chip-reported errors, or continue on to handle PIO.
2797 if (unlikely(port_cause & ERR_IRQ)) {
2799 } else if (!edma_was_enabled) {
2800 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2802 ata_sff_host_intr(ap, qc);
2804 mv_unexpected_intr(ap, edma_was_enabled);
2809 * mv_host_intr - Handle all interrupts on the given host controller
2810 * @host: host specific structure
2811 * @main_irq_cause: Main interrupt cause register for the chip.
2814 * Inherited from caller.
2816 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2818 struct mv_host_priv *hpriv = host->private_data;
2819 void __iomem *mmio = hpriv->base, *hc_mmio;
2820 unsigned int handled = 0, port;
2822 /* If asserted, clear the "all ports" IRQ coalescing bit */
2823 if (main_irq_cause & ALL_PORTS_COAL_DONE)
2824 writel(~ALL_PORTS_COAL_IRQ, mmio + IRQ_COAL_CAUSE);
2826 for (port = 0; port < hpriv->n_ports; port++) {
2827 struct ata_port *ap = host->ports[port];
2828 unsigned int p, shift, hardport, port_cause;
2830 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2832 * Each hc within the host has its own hc_irq_cause register,
2833 * where the interrupting ports bits get ack'd.
2835 if (hardport == 0) { /* first port on this hc ? */
2836 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2837 u32 port_mask, ack_irqs;
2839 * Skip this entire hc if nothing pending for any ports
2842 port += MV_PORTS_PER_HC - 1;
2846 * We don't need/want to read the hc_irq_cause register,
2847 * because doing so hurts performance, and
2848 * main_irq_cause already gives us everything we need.
2850 * But we do have to *write* to the hc_irq_cause to ack
2851 * the ports that we are handling this time through.
2853 * This requires that we create a bitmap for those
2854 * ports which interrupted us, and use that bitmap
2855 * to ack (only) those ports via hc_irq_cause.
2858 if (hc_cause & PORTS_0_3_COAL_DONE)
2859 ack_irqs = HC_COAL_IRQ;
2860 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2861 if ((port + p) >= hpriv->n_ports)
2863 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2864 if (hc_cause & port_mask)
2865 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2867 hc_mmio = mv_hc_base_from_port(mmio, port);
2868 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE);
2872 * Handle interrupts signalled for this port:
2874 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2876 mv_port_intr(ap, port_cause);
2881 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2883 struct mv_host_priv *hpriv = host->private_data;
2884 struct ata_port *ap;
2885 struct ata_queued_cmd *qc;
2886 struct ata_eh_info *ehi;
2887 unsigned int i, err_mask, printed = 0;
2890 err_cause = readl(mmio + hpriv->irq_cause_offset);
2892 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2895 DPRINTK("All regs @ PCI error\n");
2896 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2898 writelfl(0, mmio + hpriv->irq_cause_offset);
2900 for (i = 0; i < host->n_ports; i++) {
2901 ap = host->ports[i];
2902 if (!ata_link_offline(&ap->link)) {
2903 ehi = &ap->link.eh_info;
2904 ata_ehi_clear_desc(ehi);
2906 ata_ehi_push_desc(ehi,
2907 "PCI err cause 0x%08x", err_cause);
2908 err_mask = AC_ERR_HOST_BUS;
2909 ehi->action = ATA_EH_RESET;
2910 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2912 qc->err_mask |= err_mask;
2914 ehi->err_mask |= err_mask;
2916 ata_port_freeze(ap);
2919 return 1; /* handled */
2923 * mv_interrupt - Main interrupt event handler
2925 * @dev_instance: private data; in this case the host structure
2927 * Read the read only register to determine if any host
2928 * controllers have pending interrupts. If so, call lower level
2929 * routine to handle. Also check for PCI errors which are only
2933 * This routine holds the host lock while processing pending
2936 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2938 struct ata_host *host = dev_instance;
2939 struct mv_host_priv *hpriv = host->private_data;
2940 unsigned int handled = 0;
2941 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2942 u32 main_irq_cause, pending_irqs;
2944 spin_lock(&host->lock);
2946 /* for MSI: block new interrupts while in here */
2948 mv_write_main_irq_mask(0, hpriv);
2950 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2951 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2953 * Deal with cases where we either have nothing pending, or have read
2954 * a bogus register value which can indicate HW removal or PCI fault.
2956 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2957 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2958 handled = mv_pci_error(host, hpriv->base);
2960 handled = mv_host_intr(host, pending_irqs);
2963 /* for MSI: unmask; interrupt cause bits will retrigger now */
2965 mv_write_main_irq_mask(hpriv->main_irq_mask, hpriv);
2967 spin_unlock(&host->lock);
2969 return IRQ_RETVAL(handled);
2972 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2976 switch (sc_reg_in) {
2980 ofs = sc_reg_in * sizeof(u32);
2989 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2991 struct mv_host_priv *hpriv = link->ap->host->private_data;
2992 void __iomem *mmio = hpriv->base;
2993 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2994 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2996 if (ofs != 0xffffffffU) {
2997 *val = readl(addr + ofs);
3003 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
3005 struct mv_host_priv *hpriv = link->ap->host->private_data;
3006 void __iomem *mmio = hpriv->base;
3007 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
3008 unsigned int ofs = mv5_scr_offset(sc_reg_in);
3010 if (ofs != 0xffffffffU) {
3011 writelfl(val, addr + ofs);
3017 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
3019 struct pci_dev *pdev = to_pci_dev(host->dev);
3022 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
3025 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3027 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3030 mv_reset_pci_bus(host, mmio);
3033 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3035 writel(0x0fcfffff, mmio + FLASH_CTL);
3038 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
3041 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
3044 tmp = readl(phy_mmio + MV5_PHY_MODE);
3046 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
3047 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
3050 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3054 writel(0, mmio + GPIO_PORT_CTL);
3056 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
3058 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
3060 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
3063 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3066 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
3067 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
3069 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
3072 tmp = readl(phy_mmio + MV5_LTMODE);
3074 writel(tmp, phy_mmio + MV5_LTMODE);
3076 tmp = readl(phy_mmio + MV5_PHY_CTL);
3079 writel(tmp, phy_mmio + MV5_PHY_CTL);
3082 tmp = readl(phy_mmio + MV5_PHY_MODE);
3084 tmp |= hpriv->signal[port].pre;
3085 tmp |= hpriv->signal[port].amps;
3086 writel(tmp, phy_mmio + MV5_PHY_MODE);
3091 #define ZERO(reg) writel(0, port_mmio + (reg))
3092 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
3095 void __iomem *port_mmio = mv_port_base(mmio, port);
3097 mv_reset_channel(hpriv, mmio, port);
3099 ZERO(0x028); /* command */
3100 writel(0x11f, port_mmio + EDMA_CFG);
3101 ZERO(0x004); /* timer */
3102 ZERO(0x008); /* irq err cause */
3103 ZERO(0x00c); /* irq err mask */
3104 ZERO(0x010); /* rq bah */
3105 ZERO(0x014); /* rq inp */
3106 ZERO(0x018); /* rq outp */
3107 ZERO(0x01c); /* respq bah */
3108 ZERO(0x024); /* respq outp */
3109 ZERO(0x020); /* respq inp */
3110 ZERO(0x02c); /* test control */
3111 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
3115 #define ZERO(reg) writel(0, hc_mmio + (reg))
3116 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3119 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3127 tmp = readl(hc_mmio + 0x20);
3130 writel(tmp, hc_mmio + 0x20);
3134 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3137 unsigned int hc, port;
3139 for (hc = 0; hc < n_hc; hc++) {
3140 for (port = 0; port < MV_PORTS_PER_HC; port++)
3141 mv5_reset_hc_port(hpriv, mmio,
3142 (hc * MV_PORTS_PER_HC) + port);
3144 mv5_reset_one_hc(hpriv, mmio, hc);
3151 #define ZERO(reg) writel(0, mmio + (reg))
3152 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
3154 struct mv_host_priv *hpriv = host->private_data;
3157 tmp = readl(mmio + MV_PCI_MODE);
3159 writel(tmp, mmio + MV_PCI_MODE);
3161 ZERO(MV_PCI_DISC_TIMER);
3162 ZERO(MV_PCI_MSI_TRIGGER);
3163 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
3164 ZERO(MV_PCI_SERR_MASK);
3165 ZERO(hpriv->irq_cause_offset);
3166 ZERO(hpriv->irq_mask_offset);
3167 ZERO(MV_PCI_ERR_LOW_ADDRESS);
3168 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
3169 ZERO(MV_PCI_ERR_ATTRIBUTE);
3170 ZERO(MV_PCI_ERR_COMMAND);
3174 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
3178 mv5_reset_flash(hpriv, mmio);
3180 tmp = readl(mmio + GPIO_PORT_CTL);
3182 tmp |= (1 << 5) | (1 << 6);
3183 writel(tmp, mmio + GPIO_PORT_CTL);
3187 * mv6_reset_hc - Perform the 6xxx global soft reset
3188 * @mmio: base address of the HBA
3190 * This routine only applies to 6xxx parts.
3193 * Inherited from caller.
3195 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
3198 void __iomem *reg = mmio + PCI_MAIN_CMD_STS;
3202 /* Following procedure defined in PCI "main command and status
3206 writel(t | STOP_PCI_MASTER, reg);
3208 for (i = 0; i < 1000; i++) {
3211 if (PCI_MASTER_EMPTY & t)
3214 if (!(PCI_MASTER_EMPTY & t)) {
3215 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
3223 writel(t | GLOB_SFT_RST, reg);
3226 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
3228 if (!(GLOB_SFT_RST & t)) {
3229 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
3234 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
3237 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
3240 } while ((GLOB_SFT_RST & t) && (i-- > 0));
3242 if (GLOB_SFT_RST & t) {
3243 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
3250 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
3253 void __iomem *port_mmio;
3256 tmp = readl(mmio + RESET_CFG);
3257 if ((tmp & (1 << 0)) == 0) {
3258 hpriv->signal[idx].amps = 0x7 << 8;
3259 hpriv->signal[idx].pre = 0x1 << 5;
3263 port_mmio = mv_port_base(mmio, idx);
3264 tmp = readl(port_mmio + PHY_MODE2);
3266 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3267 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3270 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
3272 writel(0x00000060, mmio + GPIO_PORT_CTL);
3275 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
3278 void __iomem *port_mmio = mv_port_base(mmio, port);
3280 u32 hp_flags = hpriv->hp_flags;
3282 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3284 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
3287 if (fix_phy_mode2) {
3288 m2 = readl(port_mmio + PHY_MODE2);
3291 writel(m2, port_mmio + PHY_MODE2);
3295 m2 = readl(port_mmio + PHY_MODE2);
3296 m2 &= ~((1 << 16) | (1 << 31));
3297 writel(m2, port_mmio + PHY_MODE2);
3303 * Gen-II/IIe PHY_MODE3 errata RM#2:
3304 * Achieves better receiver noise performance than the h/w default:
3306 m3 = readl(port_mmio + PHY_MODE3);
3307 m3 = (m3 & 0x1f) | (0x5555601 << 5);
3309 /* Guideline 88F5182 (GL# SATA-S11) */
3313 if (fix_phy_mode4) {
3314 u32 m4 = readl(port_mmio + PHY_MODE4);
3316 * Enforce reserved-bit restrictions on GenIIe devices only.
3317 * For earlier chipsets, force only the internal config field
3318 * (workaround for errata FEr SATA#10 part 1).
3320 if (IS_GEN_IIE(hpriv))
3321 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
3323 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
3324 writel(m4, port_mmio + PHY_MODE4);
3327 * Workaround for 60x1-B2 errata SATA#13:
3328 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3329 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3330 * Or ensure we use writelfl() when writing PHY_MODE4.
3332 writel(m3, port_mmio + PHY_MODE3);
3334 /* Revert values of pre-emphasis and signal amps to the saved ones */
3335 m2 = readl(port_mmio + PHY_MODE2);
3337 m2 &= ~MV_M2_PREAMP_MASK;
3338 m2 |= hpriv->signal[port].amps;
3339 m2 |= hpriv->signal[port].pre;
3342 /* according to mvSata 3.6.1, some IIE values are fixed */
3343 if (IS_GEN_IIE(hpriv)) {
3348 writel(m2, port_mmio + PHY_MODE2);
3351 /* TODO: use the generic LED interface to configure the SATA Presence */
3352 /* & Acitivy LEDs on the board */
3353 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3359 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3362 void __iomem *port_mmio;
3365 port_mmio = mv_port_base(mmio, idx);
3366 tmp = readl(port_mmio + PHY_MODE2);
3368 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3369 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3373 #define ZERO(reg) writel(0, port_mmio + (reg))
3374 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3375 void __iomem *mmio, unsigned int port)
3377 void __iomem *port_mmio = mv_port_base(mmio, port);
3379 mv_reset_channel(hpriv, mmio, port);
3381 ZERO(0x028); /* command */
3382 writel(0x101f, port_mmio + EDMA_CFG);
3383 ZERO(0x004); /* timer */
3384 ZERO(0x008); /* irq err cause */
3385 ZERO(0x00c); /* irq err mask */
3386 ZERO(0x010); /* rq bah */
3387 ZERO(0x014); /* rq inp */
3388 ZERO(0x018); /* rq outp */
3389 ZERO(0x01c); /* respq bah */
3390 ZERO(0x024); /* respq outp */
3391 ZERO(0x020); /* respq inp */
3392 ZERO(0x02c); /* test control */
3393 writel(0x800, port_mmio + EDMA_IORDY_TMOUT);
3398 #define ZERO(reg) writel(0, hc_mmio + (reg))
3399 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3402 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3412 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3413 void __iomem *mmio, unsigned int n_hc)
3417 for (port = 0; port < hpriv->n_ports; port++)
3418 mv_soc_reset_hc_port(hpriv, mmio, port);
3420 mv_soc_reset_one_hc(hpriv, mmio);
3425 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3431 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3436 static void mv_soc_65n_phy_errata(struct mv_host_priv *hpriv,
3437 void __iomem *mmio, unsigned int port)
3439 void __iomem *port_mmio = mv_port_base(mmio, port);
3442 reg = readl(port_mmio + PHY_MODE3);
3443 reg &= ~(0x3 << 27); /* SELMUPF (bits 28:27) to 1 */
3445 reg &= ~(0x3 << 29); /* SELMUPI (bits 30:29) to 1 */
3447 writel(reg, port_mmio + PHY_MODE3);
3449 reg = readl(port_mmio + PHY_MODE4);
3450 reg &= ~0x1; /* SATU_OD8 (bit 0) to 0, reserved bit 16 must be set */
3452 writel(reg, port_mmio + PHY_MODE4);
3454 reg = readl(port_mmio + PHY_MODE9_GEN2);
3455 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3457 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3458 writel(reg, port_mmio + PHY_MODE9_GEN2);
3460 reg = readl(port_mmio + PHY_MODE9_GEN1);
3461 reg &= ~0xf; /* TXAMP[3:0] (bits 3:0) to 8 */
3463 reg &= ~(0x1 << 14); /* TXAMP[4] (bit 14) to 0 */
3464 writel(reg, port_mmio + PHY_MODE9_GEN1);
3468 * soc_is_65 - check if the soc is 65 nano device
3470 * Detect the type of the SoC, this is done by reading the PHYCFG_OFS
3471 * register, this register should contain non-zero value and it exists only
3472 * in the 65 nano devices, when reading it from older devices we get 0.
3474 static bool soc_is_65n(struct mv_host_priv *hpriv)
3476 void __iomem *port0_mmio = mv_port_base(hpriv->base, 0);
3478 if (readl(port0_mmio + PHYCFG_OFS))
3483 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3485 u32 ifcfg = readl(port_mmio + SATA_IFCFG);
3487 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3489 ifcfg |= (1 << 7); /* enable gen2i speed */
3490 writelfl(ifcfg, port_mmio + SATA_IFCFG);
3493 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3494 unsigned int port_no)
3496 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3499 * The datasheet warns against setting EDMA_RESET when EDMA is active
3500 * (but doesn't say what the problem might be). So we first try
3501 * to disable the EDMA engine before doing the EDMA_RESET operation.
3503 mv_stop_edma_engine(port_mmio);
3504 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3506 if (!IS_GEN_I(hpriv)) {
3507 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3508 mv_setup_ifcfg(port_mmio, 1);
3511 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3512 * link, and physical layers. It resets all SATA interface registers
3513 * (except for SATA_IFCFG), and issues a COMRESET to the dev.
3515 writelfl(EDMA_RESET, port_mmio + EDMA_CMD);
3516 udelay(25); /* allow reset propagation */
3517 writelfl(0, port_mmio + EDMA_CMD);
3519 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3521 if (IS_GEN_I(hpriv))
3525 static void mv_pmp_select(struct ata_port *ap, int pmp)
3527 if (sata_pmp_supported(ap)) {
3528 void __iomem *port_mmio = mv_ap_base(ap);
3529 u32 reg = readl(port_mmio + SATA_IFCTL);
3530 int old = reg & 0xf;
3533 reg = (reg & ~0xf) | pmp;
3534 writelfl(reg, port_mmio + SATA_IFCTL);
3539 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3540 unsigned long deadline)
3542 mv_pmp_select(link->ap, sata_srst_pmp(link));
3543 return sata_std_hardreset(link, class, deadline);
3546 static int mv_softreset(struct ata_link *link, unsigned int *class,
3547 unsigned long deadline)
3549 mv_pmp_select(link->ap, sata_srst_pmp(link));
3550 return ata_sff_softreset(link, class, deadline);
3553 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3554 unsigned long deadline)
3556 struct ata_port *ap = link->ap;
3557 struct mv_host_priv *hpriv = ap->host->private_data;
3558 struct mv_port_priv *pp = ap->private_data;
3559 void __iomem *mmio = hpriv->base;
3560 int rc, attempts = 0, extra = 0;
3564 mv_reset_channel(hpriv, mmio, ap->port_no);
3565 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3567 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3569 /* Workaround for errata FEr SATA#10 (part 2) */
3571 const unsigned long *timing =
3572 sata_ehc_deb_timing(&link->eh_context);
3574 rc = sata_link_hardreset(link, timing, deadline + extra,
3576 rc = online ? -EAGAIN : rc;
3579 sata_scr_read(link, SCR_STATUS, &sstatus);
3580 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3581 /* Force 1.5gb/s link speed and try again */
3582 mv_setup_ifcfg(mv_ap_base(ap), 0);
3583 if (time_after(jiffies + HZ, deadline))
3584 extra = HZ; /* only extend it once, max */
3586 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3587 mv_save_cached_regs(ap);
3588 mv_edma_cfg(ap, 0, 0);
3593 static void mv_eh_freeze(struct ata_port *ap)
3596 mv_enable_port_irqs(ap, 0);
3599 static void mv_eh_thaw(struct ata_port *ap)
3601 struct mv_host_priv *hpriv = ap->host->private_data;
3602 unsigned int port = ap->port_no;
3603 unsigned int hardport = mv_hardport_from_port(port);
3604 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3605 void __iomem *port_mmio = mv_ap_base(ap);
3608 /* clear EDMA errors on this port */
3609 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3611 /* clear pending irq events */
3612 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3613 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE);
3615 mv_enable_port_irqs(ap, ERR_IRQ);
3619 * mv_port_init - Perform some early initialization on a single port.
3620 * @port: libata data structure storing shadow register addresses
3621 * @port_mmio: base address of the port
3623 * Initialize shadow register mmio addresses, clear outstanding
3624 * interrupts on the port, and unmask interrupts for the future
3625 * start of the port.
3628 * Inherited from caller.
3630 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3632 void __iomem *serr, *shd_base = port_mmio + SHD_BLK;
3634 /* PIO related setup
3636 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3638 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3639 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3640 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3641 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3642 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3643 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3645 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3646 /* special case: control/altstatus doesn't have ATA_REG_ address */
3647 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST;
3649 /* Clear any currently outstanding port interrupt conditions */
3650 serr = port_mmio + mv_scr_offset(SCR_ERROR);
3651 writelfl(readl(serr), serr);
3652 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE);
3654 /* unmask all non-transient EDMA error interrupts */
3655 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK);
3657 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3658 readl(port_mmio + EDMA_CFG),
3659 readl(port_mmio + EDMA_ERR_IRQ_CAUSE),
3660 readl(port_mmio + EDMA_ERR_IRQ_MASK));
3663 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3665 struct mv_host_priv *hpriv = host->private_data;
3666 void __iomem *mmio = hpriv->base;
3669 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3670 return 0; /* not PCI-X capable */
3671 reg = readl(mmio + MV_PCI_MODE);
3672 if ((reg & MV_PCI_MODE_MASK) == 0)
3673 return 0; /* conventional PCI mode */
3674 return 1; /* chip is in PCI-X mode */
3677 static int mv_pci_cut_through_okay(struct ata_host *host)
3679 struct mv_host_priv *hpriv = host->private_data;
3680 void __iomem *mmio = hpriv->base;
3683 if (!mv_in_pcix_mode(host)) {
3684 reg = readl(mmio + MV_PCI_COMMAND);
3685 if (reg & MV_PCI_COMMAND_MRDTRIG)
3686 return 0; /* not okay */
3688 return 1; /* okay */
3691 static void mv_60x1b2_errata_pci7(struct ata_host *host)
3693 struct mv_host_priv *hpriv = host->private_data;
3694 void __iomem *mmio = hpriv->base;
3696 /* workaround for 60x1-B2 errata PCI#7 */
3697 if (mv_in_pcix_mode(host)) {
3698 u32 reg = readl(mmio + MV_PCI_COMMAND);
3699 writelfl(reg & ~MV_PCI_COMMAND_MWRCOM, mmio + MV_PCI_COMMAND);
3703 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3705 struct pci_dev *pdev = to_pci_dev(host->dev);
3706 struct mv_host_priv *hpriv = host->private_data;
3707 u32 hp_flags = hpriv->hp_flags;
3709 switch (board_idx) {
3711 hpriv->ops = &mv5xxx_ops;
3712 hp_flags |= MV_HP_GEN_I;
3714 switch (pdev->revision) {
3716 hp_flags |= MV_HP_ERRATA_50XXB0;
3719 hp_flags |= MV_HP_ERRATA_50XXB2;
3722 dev_printk(KERN_WARNING, &pdev->dev,
3723 "Applying 50XXB2 workarounds to unknown rev\n");
3724 hp_flags |= MV_HP_ERRATA_50XXB2;
3731 hpriv->ops = &mv5xxx_ops;
3732 hp_flags |= MV_HP_GEN_I;
3734 switch (pdev->revision) {
3736 hp_flags |= MV_HP_ERRATA_50XXB0;
3739 hp_flags |= MV_HP_ERRATA_50XXB2;
3742 dev_printk(KERN_WARNING, &pdev->dev,
3743 "Applying B2 workarounds to unknown rev\n");
3744 hp_flags |= MV_HP_ERRATA_50XXB2;
3751 hpriv->ops = &mv6xxx_ops;
3752 hp_flags |= MV_HP_GEN_II;
3754 switch (pdev->revision) {
3756 mv_60x1b2_errata_pci7(host);
3757 hp_flags |= MV_HP_ERRATA_60X1B2;
3760 hp_flags |= MV_HP_ERRATA_60X1C0;
3763 dev_printk(KERN_WARNING, &pdev->dev,
3764 "Applying B2 workarounds to unknown rev\n");
3765 hp_flags |= MV_HP_ERRATA_60X1B2;
3771 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3772 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3773 (pdev->device == 0x2300 || pdev->device == 0x2310))
3776 * Highpoint RocketRAID PCIe 23xx series cards:
3778 * Unconfigured drives are treated as "Legacy"
3779 * by the BIOS, and it overwrites sector 8 with
3780 * a "Lgcy" metadata block prior to Linux boot.
3782 * Configured drives (RAID or JBOD) leave sector 8
3783 * alone, but instead overwrite a high numbered
3784 * sector for the RAID metadata. This sector can
3785 * be determined exactly, by truncating the physical
3786 * drive capacity to a nice even GB value.
3788 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3790 * Warn the user, lest they think we're just buggy.
3792 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3793 " BIOS CORRUPTS DATA on all attached drives,"
3794 " regardless of if/how they are configured."
3796 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3797 " use sectors 8-9 on \"Legacy\" drives,"
3798 " and avoid the final two gigabytes on"
3799 " all RocketRAID BIOS initialized drives.\n");
3803 hpriv->ops = &mv6xxx_ops;
3804 hp_flags |= MV_HP_GEN_IIE;
3805 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3806 hp_flags |= MV_HP_CUT_THROUGH;
3808 switch (pdev->revision) {
3809 case 0x2: /* Rev.B0: the first/only public release */
3810 hp_flags |= MV_HP_ERRATA_60X1C0;
3813 dev_printk(KERN_WARNING, &pdev->dev,
3814 "Applying 60X1C0 workarounds to unknown rev\n");
3815 hp_flags |= MV_HP_ERRATA_60X1C0;
3820 if (soc_is_65n(hpriv))
3821 hpriv->ops = &mv_soc_65n_ops;
3823 hpriv->ops = &mv_soc_ops;
3824 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3825 MV_HP_ERRATA_60X1C0;
3829 dev_printk(KERN_ERR, host->dev,
3830 "BUG: invalid board index %u\n", board_idx);
3834 hpriv->hp_flags = hp_flags;
3835 if (hp_flags & MV_HP_PCIE) {
3836 hpriv->irq_cause_offset = PCIE_IRQ_CAUSE;
3837 hpriv->irq_mask_offset = PCIE_IRQ_MASK;
3838 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3840 hpriv->irq_cause_offset = PCI_IRQ_CAUSE;
3841 hpriv->irq_mask_offset = PCI_IRQ_MASK;
3842 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3849 * mv_init_host - Perform some early initialization of the host.
3850 * @host: ATA host to initialize
3852 * If possible, do an early global reset of the host. Then do
3853 * our port init and clear/unmask all/relevant host interrupts.
3856 * Inherited from caller.
3858 static int mv_init_host(struct ata_host *host)
3860 int rc = 0, n_hc, port, hc;
3861 struct mv_host_priv *hpriv = host->private_data;
3862 void __iomem *mmio = hpriv->base;
3864 rc = mv_chip_id(host, hpriv->board_idx);
3868 if (IS_SOC(hpriv)) {
3869 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE;
3870 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK;
3872 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE;
3873 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK;
3876 /* initialize shadow irq mask with register's value */
3877 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3879 /* global interrupt mask: 0 == mask everything */
3880 mv_set_main_irq_mask(host, ~0, 0);
3882 n_hc = mv_get_hc_count(host->ports[0]->flags);
3884 for (port = 0; port < host->n_ports; port++)
3885 if (hpriv->ops->read_preamp)
3886 hpriv->ops->read_preamp(hpriv, port, mmio);
3888 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3892 hpriv->ops->reset_flash(hpriv, mmio);
3893 hpriv->ops->reset_bus(host, mmio);
3894 hpriv->ops->enable_leds(hpriv, mmio);
3896 for (port = 0; port < host->n_ports; port++) {
3897 struct ata_port *ap = host->ports[port];
3898 void __iomem *port_mmio = mv_port_base(mmio, port);
3900 mv_port_init(&ap->ioaddr, port_mmio);
3903 for (hc = 0; hc < n_hc; hc++) {
3904 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3906 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3907 "(before clear)=0x%08x\n", hc,
3908 readl(hc_mmio + HC_CFG),
3909 readl(hc_mmio + HC_IRQ_CAUSE));
3911 /* Clear any currently outstanding hc interrupt conditions */
3912 writelfl(0, hc_mmio + HC_IRQ_CAUSE);
3915 if (!IS_SOC(hpriv)) {
3916 /* Clear any currently outstanding host interrupt conditions */
3917 writelfl(0, mmio + hpriv->irq_cause_offset);
3919 /* and unmask interrupt generation for host regs */
3920 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_offset);
3924 * enable only global host interrupts for now.
3925 * The per-port interrupts get done later as ports are set up.
3927 mv_set_main_irq_mask(host, 0, PCI_ERR);
3928 mv_set_irq_coalescing(host, irq_coalescing_io_count,
3929 irq_coalescing_usecs);
3934 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3936 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3938 if (!hpriv->crqb_pool)
3941 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3943 if (!hpriv->crpb_pool)
3946 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3948 if (!hpriv->sg_tbl_pool)
3954 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3955 struct mbus_dram_target_info *dram)
3959 for (i = 0; i < 4; i++) {
3960 writel(0, hpriv->base + WINDOW_CTRL(i));
3961 writel(0, hpriv->base + WINDOW_BASE(i));
3964 for (i = 0; i < dram->num_cs; i++) {
3965 struct mbus_dram_window *cs = dram->cs + i;
3967 writel(((cs->size - 1) & 0xffff0000) |
3968 (cs->mbus_attr << 8) |
3969 (dram->mbus_dram_target_id << 4) | 1,
3970 hpriv->base + WINDOW_CTRL(i));
3971 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3976 * mv_platform_probe - handle a positive probe of an soc Marvell
3978 * @pdev: platform device found
3981 * Inherited from caller.
3983 static int mv_platform_probe(struct platform_device *pdev)
3985 static int printed_version;
3986 const struct mv_sata_platform_data *mv_platform_data;
3987 const struct ata_port_info *ppi[] =
3988 { &mv_port_info[chip_soc], NULL };
3989 struct ata_host *host;
3990 struct mv_host_priv *hpriv;
3991 struct resource *res;
3994 if (!printed_version++)
3995 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3998 * Simple resource validation ..
4000 if (unlikely(pdev->num_resources != 2)) {
4001 dev_err(&pdev->dev, "invalid number of resources\n");
4006 * Get the register base first
4008 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4013 mv_platform_data = pdev->dev.platform_data;
4014 n_ports = mv_platform_data->n_ports;
4016 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4017 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4019 if (!host || !hpriv)
4021 host->private_data = hpriv;
4022 hpriv->n_ports = n_ports;
4023 hpriv->board_idx = chip_soc;
4026 hpriv->base = devm_ioremap(&pdev->dev, res->start,
4027 resource_size(res));
4028 hpriv->base -= SATAHC0_REG_BASE;
4030 #if defined(CONFIG_HAVE_CLK)
4031 hpriv->clk = clk_get(&pdev->dev, NULL);
4032 if (IS_ERR(hpriv->clk))
4033 dev_notice(&pdev->dev, "cannot get clkdev\n");
4035 clk_enable(hpriv->clk);
4039 * (Re-)program MBUS remapping windows if we are asked to.
4041 if (mv_platform_data->dram != NULL)
4042 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4044 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4048 /* initialize adapter */
4049 rc = mv_init_host(host);
4053 dev_printk(KERN_INFO, &pdev->dev,
4054 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
4057 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
4058 IRQF_SHARED, &mv6_sht);
4060 #if defined(CONFIG_HAVE_CLK)
4061 if (!IS_ERR(hpriv->clk)) {
4062 clk_disable(hpriv->clk);
4063 clk_put(hpriv->clk);
4072 * mv_platform_remove - unplug a platform interface
4073 * @pdev: platform device
4075 * A platform bus SATA device has been unplugged. Perform the needed
4076 * cleanup. Also called on module unload for any active devices.
4078 static int __devexit mv_platform_remove(struct platform_device *pdev)
4080 struct device *dev = &pdev->dev;
4081 struct ata_host *host = dev_get_drvdata(dev);
4082 #if defined(CONFIG_HAVE_CLK)
4083 struct mv_host_priv *hpriv = host->private_data;
4085 ata_host_detach(host);
4087 #if defined(CONFIG_HAVE_CLK)
4088 if (!IS_ERR(hpriv->clk)) {
4089 clk_disable(hpriv->clk);
4090 clk_put(hpriv->clk);
4097 static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
4099 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4101 return ata_host_suspend(host, state);
4106 static int mv_platform_resume(struct platform_device *pdev)
4108 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4112 struct mv_host_priv *hpriv = host->private_data;
4113 const struct mv_sata_platform_data *mv_platform_data = \
4114 pdev->dev.platform_data;
4116 * (Re-)program MBUS remapping windows if we are asked to.
4118 if (mv_platform_data->dram != NULL)
4119 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
4121 /* initialize adapter */
4122 ret = mv_init_host(host);
4124 printk(KERN_ERR DRV_NAME ": Error during HW init\n");
4127 ata_host_resume(host);
4133 #define mv_platform_suspend NULL
4134 #define mv_platform_resume NULL
4137 static struct platform_driver mv_platform_driver = {
4138 .probe = mv_platform_probe,
4139 .remove = __devexit_p(mv_platform_remove),
4140 .suspend = mv_platform_suspend,
4141 .resume = mv_platform_resume,
4144 .owner = THIS_MODULE,
4150 static int mv_pci_init_one(struct pci_dev *pdev,
4151 const struct pci_device_id *ent);
4153 static int mv_pci_device_resume(struct pci_dev *pdev);
4157 static struct pci_driver mv_pci_driver = {
4159 .id_table = mv_pci_tbl,
4160 .probe = mv_pci_init_one,
4161 .remove = ata_pci_remove_one,
4163 .suspend = ata_pci_device_suspend,
4164 .resume = mv_pci_device_resume,
4169 /* move to PCI layer or libata core? */
4170 static int pci_go_64(struct pci_dev *pdev)
4174 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4175 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4177 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4179 dev_printk(KERN_ERR, &pdev->dev,
4180 "64-bit DMA enable failed\n");
4185 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4187 dev_printk(KERN_ERR, &pdev->dev,
4188 "32-bit DMA enable failed\n");
4191 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4193 dev_printk(KERN_ERR, &pdev->dev,
4194 "32-bit consistent DMA enable failed\n");
4203 * mv_print_info - Dump key info to kernel log for perusal.
4204 * @host: ATA host to print info about
4206 * FIXME: complete this.
4209 * Inherited from caller.
4211 static void mv_print_info(struct ata_host *host)
4213 struct pci_dev *pdev = to_pci_dev(host->dev);
4214 struct mv_host_priv *hpriv = host->private_data;
4216 const char *scc_s, *gen;
4218 /* Use this to determine the HW stepping of the chip so we know
4219 * what errata to workaround
4221 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
4224 else if (scc == 0x01)
4229 if (IS_GEN_I(hpriv))
4231 else if (IS_GEN_II(hpriv))
4233 else if (IS_GEN_IIE(hpriv))
4238 dev_printk(KERN_INFO, &pdev->dev,
4239 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
4240 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
4241 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
4245 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
4246 * @pdev: PCI device found
4247 * @ent: PCI device ID entry for the matched host
4250 * Inherited from caller.
4252 static int mv_pci_init_one(struct pci_dev *pdev,
4253 const struct pci_device_id *ent)
4255 static int printed_version;
4256 unsigned int board_idx = (unsigned int)ent->driver_data;
4257 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
4258 struct ata_host *host;
4259 struct mv_host_priv *hpriv;
4260 int n_ports, port, rc;
4262 if (!printed_version++)
4263 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
4266 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
4268 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
4269 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
4270 if (!host || !hpriv)
4272 host->private_data = hpriv;
4273 hpriv->n_ports = n_ports;
4274 hpriv->board_idx = board_idx;
4276 /* acquire resources */
4277 rc = pcim_enable_device(pdev);
4281 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
4283 pcim_pin_device(pdev);
4286 host->iomap = pcim_iomap_table(pdev);
4287 hpriv->base = host->iomap[MV_PRIMARY_BAR];
4289 rc = pci_go_64(pdev);
4293 rc = mv_create_dma_pools(hpriv, &pdev->dev);
4297 for (port = 0; port < host->n_ports; port++) {
4298 struct ata_port *ap = host->ports[port];
4299 void __iomem *port_mmio = mv_port_base(hpriv->base, port);
4300 unsigned int offset = port_mmio - hpriv->base;
4302 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
4303 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
4306 /* initialize adapter */
4307 rc = mv_init_host(host);
4311 /* Enable message-switched interrupts, if requested */
4312 if (msi && pci_enable_msi(pdev) == 0)
4313 hpriv->hp_flags |= MV_HP_FLAG_MSI;
4315 mv_dump_pci_cfg(pdev, 0x68);
4316 mv_print_info(host);
4318 pci_set_master(pdev);
4319 pci_try_set_mwi(pdev);
4320 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
4321 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
4325 static int mv_pci_device_resume(struct pci_dev *pdev)
4327 struct ata_host *host = dev_get_drvdata(&pdev->dev);
4330 rc = ata_pci_device_do_resume(pdev);
4334 /* initialize adapter */
4335 rc = mv_init_host(host);
4339 ata_host_resume(host);
4346 static int mv_platform_probe(struct platform_device *pdev);
4347 static int __devexit mv_platform_remove(struct platform_device *pdev);
4349 static int __init mv_init(void)
4353 rc = pci_register_driver(&mv_pci_driver);
4357 rc = platform_driver_register(&mv_platform_driver);
4361 pci_unregister_driver(&mv_pci_driver);
4366 static void __exit mv_exit(void)
4369 pci_unregister_driver(&mv_pci_driver);
4371 platform_driver_unregister(&mv_platform_driver);
4374 MODULE_AUTHOR("Brett Russ");
4375 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
4376 MODULE_LICENSE("GPL");
4377 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
4378 MODULE_VERSION(DRV_VERSION);
4379 MODULE_ALIAS("platform:" DRV_NAME);
4381 module_init(mv_init);
4382 module_exit(mv_exit);