2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 8) Develop a low-power-consumption strategy, and implement it.
40 9) [Experiment, low priority] See if ATAPI can be supported using
41 "unknown FIS" or "vendor-specific FIS" support, or something creative
44 10) [Experiment, low priority] Investigate interrupt coalescing.
45 Quite often, especially with PCI Message Signalled Interrupts (MSI),
46 the overhead reduced by interrupt mitigation is quite often not
47 worth the latency cost.
49 11) [Experiment, Marvell value added] Is it possible to use target
50 mode to cross-connect two Linux boxes with Marvell cards? If so,
51 creating LibATA target mode support would be very interesting.
53 Target mode, for those without docs, is the ability to directly
54 connect two SATA controllers.
56 13) Verify that 7042 is fully supported. I only have a 6042.
61 #include <linux/kernel.h>
62 #include <linux/module.h>
63 #include <linux/pci.h>
64 #include <linux/init.h>
65 #include <linux/blkdev.h>
66 #include <linux/delay.h>
67 #include <linux/interrupt.h>
68 #include <linux/dma-mapping.h>
69 #include <linux/device.h>
70 #include <scsi/scsi_host.h>
71 #include <scsi/scsi_cmnd.h>
72 #include <scsi/scsi_device.h>
73 #include <linux/libata.h>
75 #define DRV_NAME "sata_mv"
76 #define DRV_VERSION "1.01"
79 /* BAR's are enumerated in terms of pci_resource_start() terms */
80 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
81 MV_IO_BAR = 2, /* offset 0x18: IO space */
82 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
84 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
85 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
88 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
89 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
90 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
91 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
92 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
93 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
95 MV_SATAHC0_REG_BASE = 0x20000,
96 MV_FLASH_CTL = 0x1046c,
97 MV_GPIO_PORT_CTL = 0x104f0,
98 MV_RESET_CFG = 0x180d8,
100 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
101 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
103 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
106 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
108 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
109 * CRPB needs alignment on a 256B boundary. Size == 256B
110 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
111 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
113 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
114 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
116 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
117 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
120 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
121 MV_PORT_HC_SHIFT = 2,
122 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
126 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
127 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING,
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
133 CRQB_FLAG_READ = (1 << 0),
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
158 MV_PCI_DISC_TIMER = 0xd04,
159 MV_PCI_MSI_TRIGGER = 0xc38,
160 MV_PCI_SERR_MASK = 0xc28,
161 MV_PCI_XBAR_TMOUT = 0x1d04,
162 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
163 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
164 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
165 MV_PCI_ERR_COMMAND = 0x1d50,
167 PCI_IRQ_CAUSE_OFS = 0x1d58,
168 PCI_IRQ_MASK_OFS = 0x1d5c,
169 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
171 PCIE_IRQ_CAUSE_OFS = 0x1900,
172 PCIE_IRQ_MASK_OFS = 0x1910,
173 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
175 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
176 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
177 PORT0_ERR = (1 << 0), /* shift by port # */
178 PORT0_DONE = (1 << 1), /* shift by port # */
179 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
180 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
182 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
183 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
184 PORTS_0_3_COAL_DONE = (1 << 8),
185 PORTS_4_7_COAL_DONE = (1 << 17),
186 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
187 GPIO_INT = (1 << 22),
188 SELF_INT = (1 << 23),
189 TWSI_INT = (1 << 24),
190 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
191 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
192 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
193 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
195 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 /* SATAHC registers */
201 HC_IRQ_CAUSE_OFS = 0x14,
202 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
203 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
204 DEV_IRQ = (1 << 8), /* shift by port # */
206 /* Shadow block registers */
208 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
220 SATA_INTERFACE_CTL = 0x050,
222 MV_M2_PREAMP_MASK = 0x7e0,
226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
234 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
235 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
236 EDMA_ERR_DEV = (1 << 2), /* device error */
237 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
238 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
239 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
240 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
241 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
242 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
243 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
244 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
245 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
246 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
247 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
250 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
251 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
252 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
253 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
255 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
257 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
258 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
259 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
260 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
261 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
262 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
264 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
266 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
267 EDMA_ERR_OVERRUN_5 = (1 << 5),
268 EDMA_ERR_UNDERRUN_5 = (1 << 6),
270 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
271 EDMA_ERR_LNK_CTRL_RX_1 |
272 EDMA_ERR_LNK_CTRL_RX_3 |
273 EDMA_ERR_LNK_CTRL_TX,
275 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
285 EDMA_ERR_LNK_CTRL_RX_2 |
286 EDMA_ERR_LNK_DATA_RX |
287 EDMA_ERR_LNK_DATA_TX |
288 EDMA_ERR_TRANS_PROTO,
289 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
294 EDMA_ERR_UNDERRUN_5 |
295 EDMA_ERR_SELF_DIS_5 |
301 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
302 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
304 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
305 EDMA_REQ_Q_PTR_SHIFT = 5,
307 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
308 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
309 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
310 EDMA_RSP_Q_PTR_SHIFT = 3,
312 EDMA_CMD_OFS = 0x28, /* EDMA command register */
313 EDMA_EN = (1 << 0), /* enable EDMA */
314 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
315 ATA_RST = (1 << 2), /* reset trans/link/phy */
317 EDMA_IORDY_TMOUT = 0x34,
320 /* Host private flags (hp_flags) */
321 MV_HP_FLAG_MSI = (1 << 0),
322 MV_HP_ERRATA_50XXB0 = (1 << 1),
323 MV_HP_ERRATA_50XXB2 = (1 << 2),
324 MV_HP_ERRATA_60X1B2 = (1 << 3),
325 MV_HP_ERRATA_60X1C0 = (1 << 4),
326 MV_HP_ERRATA_XX42A0 = (1 << 5),
327 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
328 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
329 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
330 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
338 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
339 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
340 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
343 /* DMA boundary 0xffff is required by the s/g splitting
344 * we need on /length/ in mv_fill-sg().
346 MV_DMA_BOUNDARY = 0xffffU,
348 /* mask of register bits containing lower 32 bits
349 * of EDMA request queue DMA address
351 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
353 /* ditto, for response queue */
354 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
367 /* Command ReQuest Block: 32B */
383 /* Command ResPonse Block: 8B */
390 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
398 struct mv_port_priv {
399 struct mv_crqb *crqb;
401 struct mv_crpb *crpb;
403 struct mv_sg *sg_tbl;
404 dma_addr_t sg_tbl_dma;
406 unsigned int req_idx;
407 unsigned int resp_idx;
412 struct mv_port_signal {
417 struct mv_host_priv {
419 struct mv_port_signal signal[8];
420 const struct mv_hw_ops *ops;
427 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
429 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
430 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
432 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
434 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
435 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
438 static void mv_irq_clear(struct ata_port *ap);
439 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
440 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
441 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
442 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
443 static int mv_port_start(struct ata_port *ap);
444 static void mv_port_stop(struct ata_port *ap);
445 static void mv_qc_prep(struct ata_queued_cmd *qc);
446 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
447 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
448 static void mv_error_handler(struct ata_port *ap);
449 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
450 static void mv_eh_freeze(struct ata_port *ap);
451 static void mv_eh_thaw(struct ata_port *ap);
452 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
454 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
456 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
457 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
459 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
461 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
462 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
464 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
466 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
469 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
471 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
473 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
474 unsigned int port_no);
475 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
476 void __iomem *port_mmio, int want_ncq);
477 static int __mv_stop_dma(struct ata_port *ap);
479 static struct scsi_host_template mv5_sht = {
480 .module = THIS_MODULE,
482 .ioctl = ata_scsi_ioctl,
483 .queuecommand = ata_scsi_queuecmd,
484 .can_queue = ATA_DEF_QUEUE,
485 .this_id = ATA_SHT_THIS_ID,
486 .sg_tablesize = MV_MAX_SG_CT / 2,
487 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
488 .emulated = ATA_SHT_EMULATED,
490 .proc_name = DRV_NAME,
491 .dma_boundary = MV_DMA_BOUNDARY,
492 .slave_configure = ata_scsi_slave_config,
493 .slave_destroy = ata_scsi_slave_destroy,
494 .bios_param = ata_std_bios_param,
497 static struct scsi_host_template mv6_sht = {
498 .module = THIS_MODULE,
500 .ioctl = ata_scsi_ioctl,
501 .queuecommand = ata_scsi_queuecmd,
502 .can_queue = ATA_DEF_QUEUE,
503 .this_id = ATA_SHT_THIS_ID,
504 .sg_tablesize = MV_MAX_SG_CT / 2,
505 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
506 .emulated = ATA_SHT_EMULATED,
508 .proc_name = DRV_NAME,
509 .dma_boundary = MV_DMA_BOUNDARY,
510 .slave_configure = ata_scsi_slave_config,
511 .slave_destroy = ata_scsi_slave_destroy,
512 .bios_param = ata_std_bios_param,
515 static const struct ata_port_operations mv5_ops = {
516 .tf_load = ata_tf_load,
517 .tf_read = ata_tf_read,
518 .check_status = ata_check_status,
519 .exec_command = ata_exec_command,
520 .dev_select = ata_std_dev_select,
522 .cable_detect = ata_cable_sata,
524 .qc_prep = mv_qc_prep,
525 .qc_issue = mv_qc_issue,
526 .data_xfer = ata_data_xfer,
528 .irq_clear = mv_irq_clear,
529 .irq_on = ata_irq_on,
531 .error_handler = mv_error_handler,
532 .post_internal_cmd = mv_post_int_cmd,
533 .freeze = mv_eh_freeze,
536 .scr_read = mv5_scr_read,
537 .scr_write = mv5_scr_write,
539 .port_start = mv_port_start,
540 .port_stop = mv_port_stop,
543 static const struct ata_port_operations mv6_ops = {
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
550 .cable_detect = ata_cable_sata,
552 .qc_prep = mv_qc_prep,
553 .qc_issue = mv_qc_issue,
554 .data_xfer = ata_data_xfer,
556 .irq_clear = mv_irq_clear,
557 .irq_on = ata_irq_on,
559 .error_handler = mv_error_handler,
560 .post_internal_cmd = mv_post_int_cmd,
561 .freeze = mv_eh_freeze,
564 .scr_read = mv_scr_read,
565 .scr_write = mv_scr_write,
567 .port_start = mv_port_start,
568 .port_stop = mv_port_stop,
571 static const struct ata_port_operations mv_iie_ops = {
572 .tf_load = ata_tf_load,
573 .tf_read = ata_tf_read,
574 .check_status = ata_check_status,
575 .exec_command = ata_exec_command,
576 .dev_select = ata_std_dev_select,
578 .cable_detect = ata_cable_sata,
580 .qc_prep = mv_qc_prep_iie,
581 .qc_issue = mv_qc_issue,
582 .data_xfer = ata_data_xfer,
584 .irq_clear = mv_irq_clear,
585 .irq_on = ata_irq_on,
587 .error_handler = mv_error_handler,
588 .post_internal_cmd = mv_post_int_cmd,
589 .freeze = mv_eh_freeze,
592 .scr_read = mv_scr_read,
593 .scr_write = mv_scr_write,
595 .port_start = mv_port_start,
596 .port_stop = mv_port_stop,
599 static const struct ata_port_info mv_port_info[] = {
601 .flags = MV_COMMON_FLAGS,
602 .pio_mask = 0x1f, /* pio0-4 */
603 .udma_mask = ATA_UDMA6,
604 .port_ops = &mv5_ops,
607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
608 .pio_mask = 0x1f, /* pio0-4 */
609 .udma_mask = ATA_UDMA6,
610 .port_ops = &mv5_ops,
613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
614 .pio_mask = 0x1f, /* pio0-4 */
615 .udma_mask = ATA_UDMA6,
616 .port_ops = &mv5_ops,
619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
620 .pio_mask = 0x1f, /* pio0-4 */
621 .udma_mask = ATA_UDMA6,
622 .port_ops = &mv6_ops,
625 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
627 .pio_mask = 0x1f, /* pio0-4 */
628 .udma_mask = ATA_UDMA6,
629 .port_ops = &mv6_ops,
632 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
633 .pio_mask = 0x1f, /* pio0-4 */
634 .udma_mask = ATA_UDMA6,
635 .port_ops = &mv_iie_ops,
638 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
639 .pio_mask = 0x1f, /* pio0-4 */
640 .udma_mask = ATA_UDMA6,
641 .port_ops = &mv_iie_ops,
645 static const struct pci_device_id mv_pci_tbl[] = {
646 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
647 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
648 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
649 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
650 /* RocketRAID 1740/174x have different identifiers */
651 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
652 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
654 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
655 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
656 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
657 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
658 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
660 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
663 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
665 /* Marvell 7042 support */
666 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
668 /* Highpoint RocketRAID PCIe series */
669 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
670 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
672 { } /* terminate list */
675 static struct pci_driver mv_pci_driver = {
677 .id_table = mv_pci_tbl,
678 .probe = mv_init_one,
679 .remove = ata_pci_remove_one,
682 static const struct mv_hw_ops mv5xxx_ops = {
683 .phy_errata = mv5_phy_errata,
684 .enable_leds = mv5_enable_leds,
685 .read_preamp = mv5_read_preamp,
686 .reset_hc = mv5_reset_hc,
687 .reset_flash = mv5_reset_flash,
688 .reset_bus = mv5_reset_bus,
691 static const struct mv_hw_ops mv6xxx_ops = {
692 .phy_errata = mv6_phy_errata,
693 .enable_leds = mv6_enable_leds,
694 .read_preamp = mv6_read_preamp,
695 .reset_hc = mv6_reset_hc,
696 .reset_flash = mv6_reset_flash,
697 .reset_bus = mv_reset_pci_bus,
703 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
706 /* move to PCI layer or libata core? */
707 static int pci_go_64(struct pci_dev *pdev)
711 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
712 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
714 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
716 dev_printk(KERN_ERR, &pdev->dev,
717 "64-bit DMA enable failed\n");
722 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
724 dev_printk(KERN_ERR, &pdev->dev,
725 "32-bit DMA enable failed\n");
728 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
730 dev_printk(KERN_ERR, &pdev->dev,
731 "32-bit consistent DMA enable failed\n");
743 static inline void writelfl(unsigned long data, void __iomem *addr)
746 (void) readl(addr); /* flush to avoid PCI posted write */
749 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
751 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
754 static inline unsigned int mv_hc_from_port(unsigned int port)
756 return port >> MV_PORT_HC_SHIFT;
759 static inline unsigned int mv_hardport_from_port(unsigned int port)
761 return port & MV_PORT_MASK;
764 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
767 return mv_hc_base(base, mv_hc_from_port(port));
770 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
772 return mv_hc_base_from_port(base, port) +
773 MV_SATAHC_ARBTR_REG_SZ +
774 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
777 static inline void __iomem *mv_ap_base(struct ata_port *ap)
779 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
782 static inline int mv_get_hc_count(unsigned long port_flags)
784 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
787 static void mv_irq_clear(struct ata_port *ap)
791 static void mv_set_edma_ptrs(void __iomem *port_mmio,
792 struct mv_host_priv *hpriv,
793 struct mv_port_priv *pp)
798 * initialize request queue
800 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
802 WARN_ON(pp->crqb_dma & 0x3ff);
803 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
804 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
805 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
807 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
808 writelfl((pp->crqb_dma & 0xffffffff) | index,
809 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
811 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
814 * initialize response queue
816 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
818 WARN_ON(pp->crpb_dma & 0xff);
819 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
821 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
822 writelfl((pp->crpb_dma & 0xffffffff) | index,
823 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
825 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
827 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
828 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
832 * mv_start_dma - Enable eDMA engine
833 * @base: port base address
834 * @pp: port private data
836 * Verify the local cache of the eDMA state is accurate with a
840 * Inherited from caller.
842 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
843 struct mv_port_priv *pp, u8 protocol)
845 int want_ncq = (protocol == ATA_PROT_NCQ);
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
853 struct mv_host_priv *hpriv = ap->host->private_data;
854 int hard_port = mv_hardport_from_port(ap->port_no);
855 void __iomem *hc_mmio = mv_hc_base_from_port(
856 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
857 u32 hc_irq_cause, ipending;
859 /* clear EDMA event indicators, if any */
860 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
862 /* clear EDMA interrupt indicator, if any */
863 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
864 ipending = (DEV_IRQ << hard_port) |
865 (CRPB_DMA_DONE << hard_port);
866 if (hc_irq_cause & ipending) {
867 writelfl(hc_irq_cause & ~ipending,
868 hc_mmio + HC_IRQ_CAUSE_OFS);
871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
873 /* clear FIS IRQ Cause */
874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
876 mv_set_edma_ptrs(port_mmio, hpriv, pp);
878 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
879 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
881 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
885 * __mv_stop_dma - Disable eDMA engine
886 * @ap: ATA channel to manipulate
888 * Verify the local cache of the eDMA state is accurate with a
892 * Inherited from caller.
894 static int __mv_stop_dma(struct ata_port *ap)
896 void __iomem *port_mmio = mv_ap_base(ap);
897 struct mv_port_priv *pp = ap->private_data;
901 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
902 /* Disable EDMA if active. The disable bit auto clears.
904 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
905 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
907 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
910 /* now properly wait for the eDMA to stop */
911 for (i = 1000; i > 0; i--) {
912 reg = readl(port_mmio + EDMA_CMD_OFS);
913 if (!(reg & EDMA_EN))
920 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
927 static int mv_stop_dma(struct ata_port *ap)
932 spin_lock_irqsave(&ap->host->lock, flags);
933 rc = __mv_stop_dma(ap);
934 spin_unlock_irqrestore(&ap->host->lock, flags);
940 static void mv_dump_mem(void __iomem *start, unsigned bytes)
943 for (b = 0; b < bytes; ) {
944 DPRINTK("%p: ", start + b);
945 for (w = 0; b < bytes && w < 4; w++) {
946 printk("%08x ", readl(start + b));
954 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
959 for (b = 0; b < bytes; ) {
960 DPRINTK("%02x: ", b);
961 for (w = 0; b < bytes && w < 4; w++) {
962 (void) pci_read_config_dword(pdev, b, &dw);
970 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
971 struct pci_dev *pdev)
974 void __iomem *hc_base = mv_hc_base(mmio_base,
975 port >> MV_PORT_HC_SHIFT);
976 void __iomem *port_base;
977 int start_port, num_ports, p, start_hc, num_hcs, hc;
980 start_hc = start_port = 0;
981 num_ports = 8; /* shld be benign for 4 port devs */
984 start_hc = port >> MV_PORT_HC_SHIFT;
986 num_ports = num_hcs = 1;
988 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
989 num_ports > 1 ? num_ports - 1 : start_port);
992 DPRINTK("PCI config space regs:\n");
993 mv_dump_pci_cfg(pdev, 0x68);
995 DPRINTK("PCI regs:\n");
996 mv_dump_mem(mmio_base+0xc00, 0x3c);
997 mv_dump_mem(mmio_base+0xd00, 0x34);
998 mv_dump_mem(mmio_base+0xf00, 0x4);
999 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1000 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1001 hc_base = mv_hc_base(mmio_base, hc);
1002 DPRINTK("HC regs (HC %i):\n", hc);
1003 mv_dump_mem(hc_base, 0x1c);
1005 for (p = start_port; p < start_port + num_ports; p++) {
1006 port_base = mv_port_base(mmio_base, p);
1007 DPRINTK("EDMA regs (port %i):\n", p);
1008 mv_dump_mem(port_base, 0x54);
1009 DPRINTK("SATA regs (port %i):\n", p);
1010 mv_dump_mem(port_base+0x300, 0x60);
1015 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1019 switch (sc_reg_in) {
1023 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1026 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1035 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1039 if (ofs != 0xffffffffU) {
1040 *val = readl(mv_ap_base(ap) + ofs);
1046 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1048 unsigned int ofs = mv_scr_offset(sc_reg_in);
1050 if (ofs != 0xffffffffU) {
1051 writelfl(val, mv_ap_base(ap) + ofs);
1057 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1058 void __iomem *port_mmio, int want_ncq)
1062 /* set up non-NCQ EDMA configuration */
1063 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1065 if (IS_GEN_I(hpriv))
1066 cfg |= (1 << 8); /* enab config burst size mask */
1068 else if (IS_GEN_II(hpriv))
1069 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1071 else if (IS_GEN_IIE(hpriv)) {
1072 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1073 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1074 cfg |= (1 << 18); /* enab early completion */
1075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1088 * mv_port_start - Port specific init/start routine.
1089 * @ap: ATA channel to manipulate
1091 * Allocate and point to DMA memory, init port private memory,
1095 * Inherited from caller.
1097 static int mv_port_start(struct ata_port *ap)
1099 struct device *dev = ap->host->dev;
1100 struct mv_host_priv *hpriv = ap->host->private_data;
1101 struct mv_port_priv *pp;
1102 void __iomem *port_mmio = mv_ap_base(ap);
1105 unsigned long flags;
1108 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1112 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1116 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1118 rc = ata_pad_alloc(ap, dev);
1122 /* First item in chunk of DMA memory:
1123 * 32-slot command request table (CRQB), 32 bytes each in size
1126 pp->crqb_dma = mem_dma;
1127 mem += MV_CRQB_Q_SZ;
1128 mem_dma += MV_CRQB_Q_SZ;
1131 * 32-slot command response table (CRPB), 8 bytes each in size
1134 pp->crpb_dma = mem_dma;
1135 mem += MV_CRPB_Q_SZ;
1136 mem_dma += MV_CRPB_Q_SZ;
1139 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1142 pp->sg_tbl_dma = mem_dma;
1144 spin_lock_irqsave(&ap->host->lock, flags);
1146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1150 spin_unlock_irqrestore(&ap->host->lock, flags);
1152 /* Don't turn on EDMA here...do it before DMA commands only. Else
1153 * we'll be unable to send non-data, PIO, etc due to restricted access
1156 ap->private_data = pp;
1161 * mv_port_stop - Port specific cleanup/stop routine.
1162 * @ap: ATA channel to manipulate
1164 * Stop DMA, cleanup port memory.
1167 * This routine uses the host lock to protect the DMA stop.
1169 static void mv_port_stop(struct ata_port *ap)
1175 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1176 * @qc: queued command whose SG list to source from
1178 * Populate the SG list and mark the last entry.
1181 * Inherited from caller.
1183 static void mv_fill_sg(struct ata_queued_cmd *qc)
1185 struct mv_port_priv *pp = qc->ap->private_data;
1186 struct scatterlist *sg;
1187 struct mv_sg *mv_sg, *last_sg = NULL;
1191 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1192 dma_addr_t addr = sg_dma_address(sg);
1193 u32 sg_len = sg_dma_len(sg);
1196 u32 offset = addr & 0xffff;
1199 if ((offset + sg_len > 0x10000))
1200 len = 0x10000 - offset;
1202 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1203 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1204 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1214 if (likely(last_sg))
1215 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1218 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1220 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1221 (last ? CRQB_CMD_LAST : 0);
1222 *cmdw = cpu_to_le16(tmp);
1226 * mv_qc_prep - Host specific command preparation.
1227 * @qc: queued command to prepare
1229 * This routine simply redirects to the general purpose routine
1230 * if command is not DMA. Else, it handles prep of the CRQB
1231 * (command request block), does some sanity checking, and calls
1232 * the SG load routine.
1235 * Inherited from caller.
1237 static void mv_qc_prep(struct ata_queued_cmd *qc)
1239 struct ata_port *ap = qc->ap;
1240 struct mv_port_priv *pp = ap->private_data;
1242 struct ata_taskfile *tf;
1246 if (qc->tf.protocol != ATA_PROT_DMA)
1249 /* Fill in command request block
1251 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1252 flags |= CRQB_FLAG_READ;
1253 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1254 flags |= qc->tag << CRQB_TAG_SHIFT;
1256 /* get current queue index from software */
1257 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1259 pp->crqb[in_index].sg_addr =
1260 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1261 pp->crqb[in_index].sg_addr_hi =
1262 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1263 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1265 cw = &pp->crqb[in_index].ata_cmd[0];
1268 /* Sadly, the CRQB cannot accomodate all registers--there are
1269 * only 11 bytes...so we must pick and choose required
1270 * registers based on the command. So, we drop feature and
1271 * hob_feature for [RW] DMA commands, but they are needed for
1272 * NCQ. NCQ will drop hob_nsect.
1274 switch (tf->command) {
1276 case ATA_CMD_READ_EXT:
1278 case ATA_CMD_WRITE_EXT:
1279 case ATA_CMD_WRITE_FUA_EXT:
1280 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1282 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1283 case ATA_CMD_FPDMA_READ:
1284 case ATA_CMD_FPDMA_WRITE:
1285 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1286 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1288 #endif /* FIXME: remove this line when NCQ added */
1290 /* The only other commands EDMA supports in non-queued and
1291 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1292 * of which are defined/used by Linux. If we get here, this
1293 * driver needs work.
1295 * FIXME: modify libata to give qc_prep a return value and
1296 * return error here.
1298 BUG_ON(tf->command);
1301 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1302 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1303 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1304 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1305 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1306 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1307 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1308 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1309 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1311 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1317 * mv_qc_prep_iie - Host specific command preparation.
1318 * @qc: queued command to prepare
1320 * This routine simply redirects to the general purpose routine
1321 * if command is not DMA. Else, it handles prep of the CRQB
1322 * (command request block), does some sanity checking, and calls
1323 * the SG load routine.
1326 * Inherited from caller.
1328 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1330 struct ata_port *ap = qc->ap;
1331 struct mv_port_priv *pp = ap->private_data;
1332 struct mv_crqb_iie *crqb;
1333 struct ata_taskfile *tf;
1337 if (qc->tf.protocol != ATA_PROT_DMA)
1340 /* Fill in Gen IIE command request block
1342 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1343 flags |= CRQB_FLAG_READ;
1345 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1346 flags |= qc->tag << CRQB_TAG_SHIFT;
1347 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1349 /* get current queue index from software */
1350 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1352 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1353 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1354 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1355 crqb->flags = cpu_to_le32(flags);
1358 crqb->ata_cmd[0] = cpu_to_le32(
1359 (tf->command << 16) |
1362 crqb->ata_cmd[1] = cpu_to_le32(
1368 crqb->ata_cmd[2] = cpu_to_le32(
1369 (tf->hob_lbal << 0) |
1370 (tf->hob_lbam << 8) |
1371 (tf->hob_lbah << 16) |
1372 (tf->hob_feature << 24)
1374 crqb->ata_cmd[3] = cpu_to_le32(
1376 (tf->hob_nsect << 8)
1379 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1385 * mv_qc_issue - Initiate a command to the host
1386 * @qc: queued command to start
1388 * This routine simply redirects to the general purpose routine
1389 * if command is not DMA. Else, it sanity checks our local
1390 * caches of the request producer/consumer indices then enables
1391 * DMA and bumps the request producer index.
1394 * Inherited from caller.
1396 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1398 struct ata_port *ap = qc->ap;
1399 void __iomem *port_mmio = mv_ap_base(ap);
1400 struct mv_port_priv *pp = ap->private_data;
1403 if (qc->tf.protocol != ATA_PROT_DMA) {
1404 /* We're about to send a non-EDMA capable command to the
1405 * port. Turn off EDMA so there won't be problems accessing
1406 * shadow block, etc registers.
1409 return ata_qc_issue_prot(qc);
1412 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1414 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1416 /* until we do queuing, the queue should be empty at this point */
1417 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1418 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1422 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1424 /* and write the request in pointer to kick the EDMA to life */
1425 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1426 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1432 * mv_err_intr - Handle error interrupts on the port
1433 * @ap: ATA channel to manipulate
1434 * @reset_allowed: bool: 0 == don't trigger from reset here
1436 * In most cases, just clear the interrupt and move on. However,
1437 * some cases require an eDMA reset, which is done right before
1438 * the COMRESET in mv_phy_reset(). The SERR case requires a
1439 * clear of pending errors in the SATA SERROR register. Finally,
1440 * if the port disabled DMA, update our cached copy to match.
1443 * Inherited from caller.
1445 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1447 void __iomem *port_mmio = mv_ap_base(ap);
1448 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1449 struct mv_port_priv *pp = ap->private_data;
1450 struct mv_host_priv *hpriv = ap->host->private_data;
1451 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1452 unsigned int action = 0, err_mask = 0;
1453 struct ata_eh_info *ehi = &ap->link.eh_info;
1455 ata_ehi_clear_desc(ehi);
1457 if (!edma_enabled) {
1458 /* just a guess: do we need to do this? should we
1459 * expand this, and do it in all cases?
1461 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1462 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1465 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1467 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1470 * all generations share these EDMA error cause bits
1473 if (edma_err_cause & EDMA_ERR_DEV)
1474 err_mask |= AC_ERR_DEV;
1475 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1476 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1477 EDMA_ERR_INTRL_PAR)) {
1478 err_mask |= AC_ERR_ATA_BUS;
1479 action |= ATA_EH_HARDRESET;
1480 ata_ehi_push_desc(ehi, "parity error");
1482 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1483 ata_ehi_hotplugged(ehi);
1484 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1485 "dev disconnect" : "dev connect");
1486 action |= ATA_EH_HARDRESET;
1489 if (IS_GEN_I(hpriv)) {
1490 eh_freeze_mask = EDMA_EH_FREEZE_5;
1492 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1493 struct mv_port_priv *pp = ap->private_data;
1494 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1495 ata_ehi_push_desc(ehi, "EDMA self-disable");
1498 eh_freeze_mask = EDMA_EH_FREEZE;
1500 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1501 struct mv_port_priv *pp = ap->private_data;
1502 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1503 ata_ehi_push_desc(ehi, "EDMA self-disable");
1506 if (edma_err_cause & EDMA_ERR_SERR) {
1507 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1508 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1509 err_mask = AC_ERR_ATA_BUS;
1510 action |= ATA_EH_HARDRESET;
1514 /* Clear EDMA now that SERR cleanup done */
1515 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1518 err_mask = AC_ERR_OTHER;
1519 action |= ATA_EH_HARDRESET;
1522 ehi->serror |= serr;
1523 ehi->action |= action;
1526 qc->err_mask |= err_mask;
1528 ehi->err_mask |= err_mask;
1530 if (edma_err_cause & eh_freeze_mask)
1531 ata_port_freeze(ap);
1536 static void mv_intr_pio(struct ata_port *ap)
1538 struct ata_queued_cmd *qc;
1541 /* ignore spurious intr if drive still BUSY */
1542 ata_status = readb(ap->ioaddr.status_addr);
1543 if (unlikely(ata_status & ATA_BUSY))
1546 /* get active ATA command */
1547 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1548 if (unlikely(!qc)) /* no active tag */
1550 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1553 /* and finally, complete the ATA command */
1554 qc->err_mask |= ac_err_mask(ata_status);
1555 ata_qc_complete(qc);
1558 static void mv_intr_edma(struct ata_port *ap)
1560 void __iomem *port_mmio = mv_ap_base(ap);
1561 struct mv_host_priv *hpriv = ap->host->private_data;
1562 struct mv_port_priv *pp = ap->private_data;
1563 struct ata_queued_cmd *qc;
1564 u32 out_index, in_index;
1565 bool work_done = false;
1567 /* get h/w response queue pointer */
1568 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1569 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1575 /* get s/w response queue last-read pointer, and compare */
1576 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1577 if (in_index == out_index)
1580 /* 50xx: get active ATA command */
1581 if (IS_GEN_I(hpriv))
1582 tag = ap->link.active_tag;
1584 /* Gen II/IIE: get active ATA command via tag, to enable
1585 * support for queueing. this works transparently for
1586 * queued and non-queued modes.
1589 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1591 qc = ata_qc_from_tag(ap, tag);
1593 /* For non-NCQ mode, the lower 8 bits of status
1594 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1595 * which should be zero if all went well.
1597 status = le16_to_cpu(pp->crpb[out_index].flags);
1598 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1599 mv_err_intr(ap, qc);
1603 /* and finally, complete the ATA command */
1606 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1607 ata_qc_complete(qc);
1610 /* advance software response queue pointer, to
1611 * indicate (after the loop completes) to hardware
1612 * that we have consumed a response queue entry.
1619 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1620 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1621 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1625 * mv_host_intr - Handle all interrupts on the given host controller
1626 * @host: host specific structure
1627 * @relevant: port error bits relevant to this host controller
1628 * @hc: which host controller we're to look at
1630 * Read then write clear the HC interrupt status then walk each
1631 * port connected to the HC and see if it needs servicing. Port
1632 * success ints are reported in the HC interrupt status reg, the
1633 * port error ints are reported in the higher level main
1634 * interrupt status register and thus are passed in via the
1635 * 'relevant' argument.
1638 * Inherited from caller.
1640 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1642 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1643 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1650 port0 = MV_PORTS_PER_HC;
1652 /* we'll need the HC success int register in most cases */
1653 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1657 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1659 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1660 hc, relevant, hc_irq_cause);
1662 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1663 struct ata_port *ap = host->ports[port];
1664 struct mv_port_priv *pp = ap->private_data;
1665 int have_err_bits, hard_port, shift;
1667 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1670 shift = port << 1; /* (port * 2) */
1671 if (port >= MV_PORTS_PER_HC) {
1672 shift++; /* skip bit 8 in the HC Main IRQ reg */
1674 have_err_bits = ((PORT0_ERR << shift) & relevant);
1676 if (unlikely(have_err_bits)) {
1677 struct ata_queued_cmd *qc;
1679 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1680 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1683 mv_err_intr(ap, qc);
1687 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1689 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1690 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1693 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1700 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1702 struct mv_host_priv *hpriv = host->private_data;
1703 struct ata_port *ap;
1704 struct ata_queued_cmd *qc;
1705 struct ata_eh_info *ehi;
1706 unsigned int i, err_mask, printed = 0;
1709 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1711 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1714 DPRINTK("All regs @ PCI error\n");
1715 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1717 writelfl(0, mmio + hpriv->irq_cause_ofs);
1719 for (i = 0; i < host->n_ports; i++) {
1720 ap = host->ports[i];
1721 if (!ata_link_offline(&ap->link)) {
1722 ehi = &ap->link.eh_info;
1723 ata_ehi_clear_desc(ehi);
1725 ata_ehi_push_desc(ehi,
1726 "PCI err cause 0x%08x", err_cause);
1727 err_mask = AC_ERR_HOST_BUS;
1728 ehi->action = ATA_EH_HARDRESET;
1729 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1731 qc->err_mask |= err_mask;
1733 ehi->err_mask |= err_mask;
1735 ata_port_freeze(ap);
1741 * mv_interrupt - Main interrupt event handler
1743 * @dev_instance: private data; in this case the host structure
1745 * Read the read only register to determine if any host
1746 * controllers have pending interrupts. If so, call lower level
1747 * routine to handle. Also check for PCI errors which are only
1751 * This routine holds the host lock while processing pending
1754 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1756 struct ata_host *host = dev_instance;
1757 unsigned int hc, handled = 0, n_hcs;
1758 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1759 u32 irq_stat, irq_mask;
1761 spin_lock(&host->lock);
1762 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1763 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
1765 /* check the cases where we either have nothing pending or have read
1766 * a bogus register value which can indicate HW removal or PCI fault
1768 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1771 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1773 if (unlikely(irq_stat & PCI_ERR)) {
1774 mv_pci_error(host, mmio);
1776 goto out_unlock; /* skip all other HC irq handling */
1779 for (hc = 0; hc < n_hcs; hc++) {
1780 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1782 mv_host_intr(host, relevant, hc);
1788 spin_unlock(&host->lock);
1790 return IRQ_RETVAL(handled);
1793 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1795 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1796 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1798 return hc_mmio + ofs;
1801 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1805 switch (sc_reg_in) {
1809 ofs = sc_reg_in * sizeof(u32);
1818 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1820 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1821 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1822 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1824 if (ofs != 0xffffffffU) {
1825 *val = readl(addr + ofs);
1831 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1833 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1834 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1835 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1837 if (ofs != 0xffffffffU) {
1838 writelfl(val, addr + ofs);
1844 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1848 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1851 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1853 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1856 mv_reset_pci_bus(pdev, mmio);
1859 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1861 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1864 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1867 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1870 tmp = readl(phy_mmio + MV5_PHY_MODE);
1872 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1873 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1876 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1880 writel(0, mmio + MV_GPIO_PORT_CTL);
1882 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1884 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1889 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1892 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1893 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1895 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1898 tmp = readl(phy_mmio + MV5_LT_MODE);
1900 writel(tmp, phy_mmio + MV5_LT_MODE);
1902 tmp = readl(phy_mmio + MV5_PHY_CTL);
1905 writel(tmp, phy_mmio + MV5_PHY_CTL);
1908 tmp = readl(phy_mmio + MV5_PHY_MODE);
1910 tmp |= hpriv->signal[port].pre;
1911 tmp |= hpriv->signal[port].amps;
1912 writel(tmp, phy_mmio + MV5_PHY_MODE);
1917 #define ZERO(reg) writel(0, port_mmio + (reg))
1918 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1921 void __iomem *port_mmio = mv_port_base(mmio, port);
1923 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1925 mv_channel_reset(hpriv, mmio, port);
1927 ZERO(0x028); /* command */
1928 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1929 ZERO(0x004); /* timer */
1930 ZERO(0x008); /* irq err cause */
1931 ZERO(0x00c); /* irq err mask */
1932 ZERO(0x010); /* rq bah */
1933 ZERO(0x014); /* rq inp */
1934 ZERO(0x018); /* rq outp */
1935 ZERO(0x01c); /* respq bah */
1936 ZERO(0x024); /* respq outp */
1937 ZERO(0x020); /* respq inp */
1938 ZERO(0x02c); /* test control */
1939 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1943 #define ZERO(reg) writel(0, hc_mmio + (reg))
1944 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1947 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1955 tmp = readl(hc_mmio + 0x20);
1958 writel(tmp, hc_mmio + 0x20);
1962 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1965 unsigned int hc, port;
1967 for (hc = 0; hc < n_hc; hc++) {
1968 for (port = 0; port < MV_PORTS_PER_HC; port++)
1969 mv5_reset_hc_port(hpriv, mmio,
1970 (hc * MV_PORTS_PER_HC) + port);
1972 mv5_reset_one_hc(hpriv, mmio, hc);
1979 #define ZERO(reg) writel(0, mmio + (reg))
1980 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1982 struct ata_host *host = dev_get_drvdata(&pdev->dev);
1983 struct mv_host_priv *hpriv = host->private_data;
1986 tmp = readl(mmio + MV_PCI_MODE);
1988 writel(tmp, mmio + MV_PCI_MODE);
1990 ZERO(MV_PCI_DISC_TIMER);
1991 ZERO(MV_PCI_MSI_TRIGGER);
1992 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1993 ZERO(HC_MAIN_IRQ_MASK_OFS);
1994 ZERO(MV_PCI_SERR_MASK);
1995 ZERO(hpriv->irq_cause_ofs);
1996 ZERO(hpriv->irq_mask_ofs);
1997 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1998 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1999 ZERO(MV_PCI_ERR_ATTRIBUTE);
2000 ZERO(MV_PCI_ERR_COMMAND);
2004 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2008 mv5_reset_flash(hpriv, mmio);
2010 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2012 tmp |= (1 << 5) | (1 << 6);
2013 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2017 * mv6_reset_hc - Perform the 6xxx global soft reset
2018 * @mmio: base address of the HBA
2020 * This routine only applies to 6xxx parts.
2023 * Inherited from caller.
2025 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2028 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2032 /* Following procedure defined in PCI "main command and status
2036 writel(t | STOP_PCI_MASTER, reg);
2038 for (i = 0; i < 1000; i++) {
2041 if (PCI_MASTER_EMPTY & t)
2044 if (!(PCI_MASTER_EMPTY & t)) {
2045 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2053 writel(t | GLOB_SFT_RST, reg);
2056 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2058 if (!(GLOB_SFT_RST & t)) {
2059 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2064 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2067 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2070 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2072 if (GLOB_SFT_RST & t) {
2073 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2080 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2083 void __iomem *port_mmio;
2086 tmp = readl(mmio + MV_RESET_CFG);
2087 if ((tmp & (1 << 0)) == 0) {
2088 hpriv->signal[idx].amps = 0x7 << 8;
2089 hpriv->signal[idx].pre = 0x1 << 5;
2093 port_mmio = mv_port_base(mmio, idx);
2094 tmp = readl(port_mmio + PHY_MODE2);
2096 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2097 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2100 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2102 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2105 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2108 void __iomem *port_mmio = mv_port_base(mmio, port);
2110 u32 hp_flags = hpriv->hp_flags;
2112 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2114 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2117 if (fix_phy_mode2) {
2118 m2 = readl(port_mmio + PHY_MODE2);
2121 writel(m2, port_mmio + PHY_MODE2);
2125 m2 = readl(port_mmio + PHY_MODE2);
2126 m2 &= ~((1 << 16) | (1 << 31));
2127 writel(m2, port_mmio + PHY_MODE2);
2132 /* who knows what this magic does */
2133 tmp = readl(port_mmio + PHY_MODE3);
2136 writel(tmp, port_mmio + PHY_MODE3);
2138 if (fix_phy_mode4) {
2141 m4 = readl(port_mmio + PHY_MODE4);
2143 if (hp_flags & MV_HP_ERRATA_60X1B2)
2144 tmp = readl(port_mmio + 0x310);
2146 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2148 writel(m4, port_mmio + PHY_MODE4);
2150 if (hp_flags & MV_HP_ERRATA_60X1B2)
2151 writel(tmp, port_mmio + 0x310);
2154 /* Revert values of pre-emphasis and signal amps to the saved ones */
2155 m2 = readl(port_mmio + PHY_MODE2);
2157 m2 &= ~MV_M2_PREAMP_MASK;
2158 m2 |= hpriv->signal[port].amps;
2159 m2 |= hpriv->signal[port].pre;
2162 /* according to mvSata 3.6.1, some IIE values are fixed */
2163 if (IS_GEN_IIE(hpriv)) {
2168 writel(m2, port_mmio + PHY_MODE2);
2171 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2172 unsigned int port_no)
2174 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2176 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2178 if (IS_GEN_II(hpriv)) {
2179 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2180 ifctl |= (1 << 7); /* enable gen2i speed */
2181 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2182 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2185 udelay(25); /* allow reset propagation */
2187 /* Spec never mentions clearing the bit. Marvell's driver does
2188 * clear the bit, however.
2190 writelfl(0, port_mmio + EDMA_CMD_OFS);
2192 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2194 if (IS_GEN_I(hpriv))
2199 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2200 * @ap: ATA channel to manipulate
2202 * Part of this is taken from __sata_phy_reset and modified to
2203 * not sleep since this routine gets called from interrupt level.
2206 * Inherited from caller. This is coded to safe to call at
2207 * interrupt level, i.e. it does not sleep.
2209 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2210 unsigned long deadline)
2212 struct mv_port_priv *pp = ap->private_data;
2213 struct mv_host_priv *hpriv = ap->host->private_data;
2214 void __iomem *port_mmio = mv_ap_base(ap);
2218 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2222 u32 sstatus, serror, scontrol;
2224 mv_scr_read(ap, SCR_STATUS, &sstatus);
2225 mv_scr_read(ap, SCR_ERROR, &serror);
2226 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2227 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2228 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2232 /* Issue COMRESET via SControl */
2234 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2237 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2241 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2242 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2246 } while (time_before(jiffies, deadline));
2248 /* work around errata */
2249 if (IS_GEN_II(hpriv) &&
2250 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2252 goto comreset_retry;
2256 u32 sstatus, serror, scontrol;
2258 mv_scr_read(ap, SCR_STATUS, &sstatus);
2259 mv_scr_read(ap, SCR_ERROR, &serror);
2260 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2261 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2262 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2266 if (ata_link_offline(&ap->link)) {
2267 *class = ATA_DEV_NONE;
2271 /* even after SStatus reflects that device is ready,
2272 * it seems to take a while for link to be fully
2273 * established (and thus Status no longer 0x80/0x7F),
2274 * so we poll a bit for that, here.
2278 u8 drv_stat = ata_check_status(ap);
2279 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2284 if (time_after(jiffies, deadline))
2288 /* FIXME: if we passed the deadline, the following
2289 * code probably produces an invalid result
2292 /* finally, read device signature from TF registers */
2293 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2295 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2297 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2302 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2304 struct ata_port *ap = link->ap;
2305 struct mv_port_priv *pp = ap->private_data;
2306 struct ata_eh_context *ehc = &link->eh_context;
2309 rc = mv_stop_dma(ap);
2311 ehc->i.action |= ATA_EH_HARDRESET;
2313 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2314 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2315 ehc->i.action |= ATA_EH_HARDRESET;
2318 /* if we're about to do hardreset, nothing more to do */
2319 if (ehc->i.action & ATA_EH_HARDRESET)
2322 if (ata_link_online(link))
2323 rc = ata_wait_ready(ap, deadline);
2330 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2331 unsigned long deadline)
2333 struct ata_port *ap = link->ap;
2334 struct mv_host_priv *hpriv = ap->host->private_data;
2335 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2339 mv_channel_reset(hpriv, mmio, ap->port_no);
2341 mv_phy_reset(ap, class, deadline);
2346 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2348 struct ata_port *ap = link->ap;
2351 /* print link status */
2352 sata_print_link_status(link);
2355 sata_scr_read(link, SCR_ERROR, &serr);
2356 sata_scr_write_flush(link, SCR_ERROR, serr);
2358 /* bail out if no device is present */
2359 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2360 DPRINTK("EXIT, no device\n");
2364 /* set up device control */
2365 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2368 static void mv_error_handler(struct ata_port *ap)
2370 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2371 mv_hardreset, mv_postreset);
2374 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2376 mv_stop_dma(qc->ap);
2379 static void mv_eh_freeze(struct ata_port *ap)
2381 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2382 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2386 /* FIXME: handle coalescing completion events properly */
2388 shift = ap->port_no * 2;
2392 mask = 0x3 << shift;
2394 /* disable assertion of portN err, done events */
2395 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2396 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2399 static void mv_eh_thaw(struct ata_port *ap)
2401 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2402 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2403 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2404 void __iomem *port_mmio = mv_ap_base(ap);
2405 u32 tmp, mask, hc_irq_cause;
2406 unsigned int shift, hc_port_no = ap->port_no;
2408 /* FIXME: handle coalescing completion events properly */
2410 shift = ap->port_no * 2;
2416 mask = 0x3 << shift;
2418 /* clear EDMA errors on this port */
2419 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2421 /* clear pending irq events */
2422 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2423 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2424 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2425 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2427 /* enable assertion of portN err, done events */
2428 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2429 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2433 * mv_port_init - Perform some early initialization on a single port.
2434 * @port: libata data structure storing shadow register addresses
2435 * @port_mmio: base address of the port
2437 * Initialize shadow register mmio addresses, clear outstanding
2438 * interrupts on the port, and unmask interrupts for the future
2439 * start of the port.
2442 * Inherited from caller.
2444 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2446 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2449 /* PIO related setup
2451 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2453 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2454 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2455 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2456 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2457 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2458 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2460 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2461 /* special case: control/altstatus doesn't have ATA_REG_ address */
2462 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2465 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2467 /* Clear any currently outstanding port interrupt conditions */
2468 serr_ofs = mv_scr_offset(SCR_ERROR);
2469 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2470 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2472 /* unmask all non-transient EDMA error interrupts */
2473 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2475 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2476 readl(port_mmio + EDMA_CFG_OFS),
2477 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2478 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2481 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2483 struct pci_dev *pdev = to_pci_dev(host->dev);
2484 struct mv_host_priv *hpriv = host->private_data;
2485 u32 hp_flags = hpriv->hp_flags;
2487 switch (board_idx) {
2489 hpriv->ops = &mv5xxx_ops;
2490 hp_flags |= MV_HP_GEN_I;
2492 switch (pdev->revision) {
2494 hp_flags |= MV_HP_ERRATA_50XXB0;
2497 hp_flags |= MV_HP_ERRATA_50XXB2;
2500 dev_printk(KERN_WARNING, &pdev->dev,
2501 "Applying 50XXB2 workarounds to unknown rev\n");
2502 hp_flags |= MV_HP_ERRATA_50XXB2;
2509 hpriv->ops = &mv5xxx_ops;
2510 hp_flags |= MV_HP_GEN_I;
2512 switch (pdev->revision) {
2514 hp_flags |= MV_HP_ERRATA_50XXB0;
2517 hp_flags |= MV_HP_ERRATA_50XXB2;
2520 dev_printk(KERN_WARNING, &pdev->dev,
2521 "Applying B2 workarounds to unknown rev\n");
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 hpriv->ops = &mv6xxx_ops;
2530 hp_flags |= MV_HP_GEN_II;
2532 switch (pdev->revision) {
2534 hp_flags |= MV_HP_ERRATA_60X1B2;
2537 hp_flags |= MV_HP_ERRATA_60X1C0;
2540 dev_printk(KERN_WARNING, &pdev->dev,
2541 "Applying B2 workarounds to unknown rev\n");
2542 hp_flags |= MV_HP_ERRATA_60X1B2;
2548 hp_flags |= MV_HP_PCIE;
2549 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2550 (pdev->device == 0x2300 || pdev->device == 0x2310))
2553 * Highpoint RocketRAID PCIe 23xx series cards:
2555 * Unconfigured drives are treated as "Legacy"
2556 * by the BIOS, and it overwrites sector 8 with
2557 * a "Lgcy" metadata block prior to Linux boot.
2559 * Configured drives (RAID or JBOD) leave sector 8
2560 * alone, but instead overwrite a high numbered
2561 * sector for the RAID metadata. This sector can
2562 * be determined exactly, by truncating the physical
2563 * drive capacity to a nice even GB value.
2565 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2567 * Warn the user, lest they think we're just buggy.
2569 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2570 " BIOS CORRUPTS DATA on all attached drives,"
2571 " regardless of if/how they are configured."
2573 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2574 " use sectors 8-9 on \"Legacy\" drives,"
2575 " and avoid the final two gigabytes on"
2576 " all RocketRAID BIOS initialized drives.\n");
2579 hpriv->ops = &mv6xxx_ops;
2580 hp_flags |= MV_HP_GEN_IIE;
2582 switch (pdev->revision) {
2584 hp_flags |= MV_HP_ERRATA_XX42A0;
2587 hp_flags |= MV_HP_ERRATA_60X1C0;
2590 dev_printk(KERN_WARNING, &pdev->dev,
2591 "Applying 60X1C0 workarounds to unknown rev\n");
2592 hp_flags |= MV_HP_ERRATA_60X1C0;
2598 dev_printk(KERN_ERR, &pdev->dev,
2599 "BUG: invalid board index %u\n", board_idx);
2603 hpriv->hp_flags = hp_flags;
2604 if (hp_flags & MV_HP_PCIE) {
2605 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2606 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2607 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2609 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2610 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2611 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2618 * mv_init_host - Perform some early initialization of the host.
2619 * @host: ATA host to initialize
2620 * @board_idx: controller index
2622 * If possible, do an early global reset of the host. Then do
2623 * our port init and clear/unmask all/relevant host interrupts.
2626 * Inherited from caller.
2628 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2630 int rc = 0, n_hc, port, hc;
2631 struct pci_dev *pdev = to_pci_dev(host->dev);
2632 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2633 struct mv_host_priv *hpriv = host->private_data;
2635 /* global interrupt mask */
2636 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2638 rc = mv_chip_id(host, board_idx);
2642 n_hc = mv_get_hc_count(host->ports[0]->flags);
2644 for (port = 0; port < host->n_ports; port++)
2645 hpriv->ops->read_preamp(hpriv, port, mmio);
2647 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2651 hpriv->ops->reset_flash(hpriv, mmio);
2652 hpriv->ops->reset_bus(pdev, mmio);
2653 hpriv->ops->enable_leds(hpriv, mmio);
2655 for (port = 0; port < host->n_ports; port++) {
2656 if (IS_GEN_II(hpriv)) {
2657 void __iomem *port_mmio = mv_port_base(mmio, port);
2659 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2660 ifctl |= (1 << 7); /* enable gen2i speed */
2661 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2662 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2665 hpriv->ops->phy_errata(hpriv, mmio, port);
2668 for (port = 0; port < host->n_ports; port++) {
2669 struct ata_port *ap = host->ports[port];
2670 void __iomem *port_mmio = mv_port_base(mmio, port);
2671 unsigned int offset = port_mmio - mmio;
2673 mv_port_init(&ap->ioaddr, port_mmio);
2675 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2676 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2679 for (hc = 0; hc < n_hc; hc++) {
2680 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2682 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2683 "(before clear)=0x%08x\n", hc,
2684 readl(hc_mmio + HC_CFG_OFS),
2685 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2687 /* Clear any currently outstanding hc interrupt conditions */
2688 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2691 /* Clear any currently outstanding host interrupt conditions */
2692 writelfl(0, mmio + hpriv->irq_cause_ofs);
2694 /* and unmask interrupt generation for host regs */
2695 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2697 if (IS_GEN_I(hpriv))
2698 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2700 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2702 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2703 "PCI int cause/mask=0x%08x/0x%08x\n",
2704 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2705 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2706 readl(mmio + hpriv->irq_cause_ofs),
2707 readl(mmio + hpriv->irq_mask_ofs));
2714 * mv_print_info - Dump key info to kernel log for perusal.
2715 * @host: ATA host to print info about
2717 * FIXME: complete this.
2720 * Inherited from caller.
2722 static void mv_print_info(struct ata_host *host)
2724 struct pci_dev *pdev = to_pci_dev(host->dev);
2725 struct mv_host_priv *hpriv = host->private_data;
2727 const char *scc_s, *gen;
2729 /* Use this to determine the HW stepping of the chip so we know
2730 * what errata to workaround
2732 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2735 else if (scc == 0x01)
2740 if (IS_GEN_I(hpriv))
2742 else if (IS_GEN_II(hpriv))
2744 else if (IS_GEN_IIE(hpriv))
2749 dev_printk(KERN_INFO, &pdev->dev,
2750 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2751 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2752 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2756 * mv_init_one - handle a positive probe of a Marvell host
2757 * @pdev: PCI device found
2758 * @ent: PCI device ID entry for the matched host
2761 * Inherited from caller.
2763 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2765 static int printed_version;
2766 unsigned int board_idx = (unsigned int)ent->driver_data;
2767 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2768 struct ata_host *host;
2769 struct mv_host_priv *hpriv;
2772 if (!printed_version++)
2773 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2776 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2778 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2779 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2780 if (!host || !hpriv)
2782 host->private_data = hpriv;
2784 /* acquire resources */
2785 rc = pcim_enable_device(pdev);
2789 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2791 pcim_pin_device(pdev);
2794 host->iomap = pcim_iomap_table(pdev);
2796 rc = pci_go_64(pdev);
2800 /* initialize adapter */
2801 rc = mv_init_host(host, board_idx);
2805 /* Enable interrupts */
2806 if (msi && pci_enable_msi(pdev))
2809 mv_dump_pci_cfg(pdev, 0x68);
2810 mv_print_info(host);
2812 pci_set_master(pdev);
2813 pci_try_set_mwi(pdev);
2814 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2815 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2818 static int __init mv_init(void)
2820 return pci_register_driver(&mv_pci_driver);
2823 static void __exit mv_exit(void)
2825 pci_unregister_driver(&mv_pci_driver);
2828 MODULE_AUTHOR("Brett Russ");
2829 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2830 MODULE_LICENSE("GPL");
2831 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2832 MODULE_VERSION(DRV_VERSION);
2834 module_param(msi, int, 0444);
2835 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2837 module_init(mv_init);
2838 module_exit(mv_exit);