2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Improve/fix IRQ and error handling sequences.
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42 6) Add port multiplier support (intermediate)
44 8) Develop a low-power-consumption strategy, and implement it.
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
65 #include <linux/kernel.h>
66 #include <linux/module.h>
67 #include <linux/pci.h>
68 #include <linux/init.h>
69 #include <linux/blkdev.h>
70 #include <linux/delay.h>
71 #include <linux/interrupt.h>
72 #include <linux/dmapool.h>
73 #include <linux/dma-mapping.h>
74 #include <linux/device.h>
75 #include <linux/platform_device.h>
76 #include <linux/ata_platform.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <scsi/scsi_device.h>
80 #include <linux/libata.h>
82 #define DRV_NAME "sata_mv"
83 #define DRV_VERSION "1.20"
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
96 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
102 MV_SATAHC0_REG_BASE = 0x20000,
103 MV_FLASH_CTL = 0x1046c,
104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
141 CRQB_FLAG_READ = (1 << 0),
143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
149 CRPB_FLAG_STATUS_SHIFT = 8,
150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
155 /* PCI interface registers */
157 PCI_COMMAND_OFS = 0xc00,
159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
210 /* SATAHC registers */
213 HC_IRQ_CAUSE_OFS = 0x14,
214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
218 /* Shadow block registers */
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
232 SATA_INTERFACE_CTL = 0x050,
234 MV_M2_PREAMP_MASK = 0x7e0,
238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
322 EDMA_RSP_Q_PTR_SHIFT = 3,
324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
329 EDMA_IORDY_TMOUT = 0x34,
332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
338 MV_HP_ERRATA_XX42A0 = (1 << 5),
339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
344 /* Port private flags (pp_flags) */
345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
350 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
352 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
353 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
359 MV_DMA_BOUNDARY = 0xffffU,
361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
366 /* ditto, for response queue */
367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
381 /* Command ReQuest Block: 32B */
397 /* Command ResPonse Block: 8B */
404 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
412 struct mv_port_priv {
413 struct mv_crqb *crqb;
415 struct mv_crpb *crpb;
417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
420 unsigned int req_idx;
421 unsigned int resp_idx;
426 struct mv_port_signal {
431 struct mv_host_priv {
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
464 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
468 static int mv_port_start(struct ata_port *ap);
469 static void mv_port_stop(struct ata_port *ap);
470 static void mv_qc_prep(struct ata_queued_cmd *qc);
471 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
472 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
473 static int mv_prereset(struct ata_link *link, unsigned long deadline);
474 static int mv_hardreset(struct ata_link *link, unsigned int *class,
475 unsigned long deadline);
476 static void mv_postreset(struct ata_link *link, unsigned int *classes);
477 static void mv_eh_freeze(struct ata_port *ap);
478 static void mv_eh_thaw(struct ata_port *ap);
479 static void mv6_dev_config(struct ata_device *dev);
481 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
483 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
484 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
486 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
488 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
489 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
491 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
493 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
494 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
496 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
498 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
499 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
501 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
503 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
504 void __iomem *mmio, unsigned int n_hc);
505 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
507 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
508 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
509 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int port_no);
511 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
512 void __iomem *port_mmio, int want_ncq);
513 static int __mv_stop_dma(struct ata_port *ap);
515 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
516 * because we have to allow room for worst case splitting of
517 * PRDs for 64K boundaries in mv_fill_sg().
519 static struct scsi_host_template mv5_sht = {
520 ATA_BASE_SHT(DRV_NAME),
521 .sg_tablesize = MV_MAX_SG_CT / 2,
522 .dma_boundary = MV_DMA_BOUNDARY,
525 static struct scsi_host_template mv6_sht = {
526 ATA_NCQ_SHT(DRV_NAME),
527 .can_queue = MV_MAX_Q_DEPTH - 1,
528 .sg_tablesize = MV_MAX_SG_CT / 2,
529 .dma_boundary = MV_DMA_BOUNDARY,
532 static struct ata_port_operations mv5_ops = {
533 .inherits = &ata_sff_port_ops,
535 .qc_prep = mv_qc_prep,
536 .qc_issue = mv_qc_issue,
538 .freeze = mv_eh_freeze,
540 .prereset = mv_prereset,
541 .hardreset = mv_hardreset,
542 .postreset = mv_postreset,
543 .error_handler = ata_std_error_handler, /* avoid SFF EH */
544 .post_internal_cmd = ATA_OP_NULL,
546 .scr_read = mv5_scr_read,
547 .scr_write = mv5_scr_write,
549 .port_start = mv_port_start,
550 .port_stop = mv_port_stop,
553 static struct ata_port_operations mv6_ops = {
554 .inherits = &mv5_ops,
555 .qc_defer = ata_std_qc_defer,
556 .dev_config = mv6_dev_config,
557 .scr_read = mv_scr_read,
558 .scr_write = mv_scr_write,
561 static struct ata_port_operations mv_iie_ops = {
562 .inherits = &mv6_ops,
563 .dev_config = ATA_OP_NULL,
564 .qc_prep = mv_qc_prep_iie,
567 static const struct ata_port_info mv_port_info[] = {
569 .flags = MV_COMMON_FLAGS,
570 .pio_mask = 0x1f, /* pio0-4 */
571 .udma_mask = ATA_UDMA6,
572 .port_ops = &mv5_ops,
575 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
576 .pio_mask = 0x1f, /* pio0-4 */
577 .udma_mask = ATA_UDMA6,
578 .port_ops = &mv5_ops,
581 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
582 .pio_mask = 0x1f, /* pio0-4 */
583 .udma_mask = ATA_UDMA6,
584 .port_ops = &mv5_ops,
587 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
589 .pio_mask = 0x1f, /* pio0-4 */
590 .udma_mask = ATA_UDMA6,
591 .port_ops = &mv6_ops,
594 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
595 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv6_ops,
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv_iie_ops,
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv_iie_ops,
615 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv_iie_ops,
622 static const struct pci_device_id mv_pci_tbl[] = {
623 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
624 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
625 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
626 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
627 /* RocketRAID 1740/174x have different identifiers */
628 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
629 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
631 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
632 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
633 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
634 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
635 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
637 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
640 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
642 /* Marvell 7042 support */
643 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
645 /* Highpoint RocketRAID PCIe series */
646 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
647 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
649 { } /* terminate list */
652 static const struct mv_hw_ops mv5xxx_ops = {
653 .phy_errata = mv5_phy_errata,
654 .enable_leds = mv5_enable_leds,
655 .read_preamp = mv5_read_preamp,
656 .reset_hc = mv5_reset_hc,
657 .reset_flash = mv5_reset_flash,
658 .reset_bus = mv5_reset_bus,
661 static const struct mv_hw_ops mv6xxx_ops = {
662 .phy_errata = mv6_phy_errata,
663 .enable_leds = mv6_enable_leds,
664 .read_preamp = mv6_read_preamp,
665 .reset_hc = mv6_reset_hc,
666 .reset_flash = mv6_reset_flash,
667 .reset_bus = mv_reset_pci_bus,
670 static const struct mv_hw_ops mv_soc_ops = {
671 .phy_errata = mv6_phy_errata,
672 .enable_leds = mv_soc_enable_leds,
673 .read_preamp = mv_soc_read_preamp,
674 .reset_hc = mv_soc_reset_hc,
675 .reset_flash = mv_soc_reset_flash,
676 .reset_bus = mv_soc_reset_bus,
683 static inline void writelfl(unsigned long data, void __iomem *addr)
686 (void) readl(addr); /* flush to avoid PCI posted write */
689 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
691 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
694 static inline unsigned int mv_hc_from_port(unsigned int port)
696 return port >> MV_PORT_HC_SHIFT;
699 static inline unsigned int mv_hardport_from_port(unsigned int port)
701 return port & MV_PORT_MASK;
704 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
707 return mv_hc_base(base, mv_hc_from_port(port));
710 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
712 return mv_hc_base_from_port(base, port) +
713 MV_SATAHC_ARBTR_REG_SZ +
714 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
717 static inline void __iomem *mv_host_base(struct ata_host *host)
719 struct mv_host_priv *hpriv = host->private_data;
723 static inline void __iomem *mv_ap_base(struct ata_port *ap)
725 return mv_port_base(mv_host_base(ap->host), ap->port_no);
728 static inline int mv_get_hc_count(unsigned long port_flags)
730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
733 static void mv_set_edma_ptrs(void __iomem *port_mmio,
734 struct mv_host_priv *hpriv,
735 struct mv_port_priv *pp)
740 * initialize request queue
742 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl((pp->crqb_dma & 0xffffffff) | index,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
753 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
756 * initialize response queue
758 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
760 WARN_ON(pp->crpb_dma & 0xff);
761 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
763 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
764 writelfl((pp->crpb_dma & 0xffffffff) | index,
765 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
767 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
769 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
770 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
774 * mv_start_dma - Enable eDMA engine
775 * @base: port base address
776 * @pp: port private data
778 * Verify the local cache of the eDMA state is accurate with a
782 * Inherited from caller.
784 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
785 struct mv_port_priv *pp, u8 protocol)
787 int want_ncq = (protocol == ATA_PROT_NCQ);
789 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
790 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
791 if (want_ncq != using_ncq)
794 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
795 struct mv_host_priv *hpriv = ap->host->private_data;
796 int hard_port = mv_hardport_from_port(ap->port_no);
797 void __iomem *hc_mmio = mv_hc_base_from_port(
798 mv_host_base(ap->host), hard_port);
799 u32 hc_irq_cause, ipending;
801 /* clear EDMA event indicators, if any */
802 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
804 /* clear EDMA interrupt indicator, if any */
805 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
806 ipending = (DEV_IRQ << hard_port) |
807 (CRPB_DMA_DONE << hard_port);
808 if (hc_irq_cause & ipending) {
809 writelfl(hc_irq_cause & ~ipending,
810 hc_mmio + HC_IRQ_CAUSE_OFS);
813 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
815 /* clear FIS IRQ Cause */
816 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
818 mv_set_edma_ptrs(port_mmio, hpriv, pp);
820 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
821 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
823 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
827 * __mv_stop_dma - Disable eDMA engine
828 * @ap: ATA channel to manipulate
830 * Verify the local cache of the eDMA state is accurate with a
834 * Inherited from caller.
836 static int __mv_stop_dma(struct ata_port *ap)
838 void __iomem *port_mmio = mv_ap_base(ap);
839 struct mv_port_priv *pp = ap->private_data;
843 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
844 /* Disable EDMA if active. The disable bit auto clears.
846 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
847 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
849 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
852 /* now properly wait for the eDMA to stop */
853 for (i = 1000; i > 0; i--) {
854 reg = readl(port_mmio + EDMA_CMD_OFS);
855 if (!(reg & EDMA_EN))
862 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
869 static int mv_stop_dma(struct ata_port *ap)
874 spin_lock_irqsave(&ap->host->lock, flags);
875 rc = __mv_stop_dma(ap);
876 spin_unlock_irqrestore(&ap->host->lock, flags);
882 static void mv_dump_mem(void __iomem *start, unsigned bytes)
885 for (b = 0; b < bytes; ) {
886 DPRINTK("%p: ", start + b);
887 for (w = 0; b < bytes && w < 4; w++) {
888 printk("%08x ", readl(start + b));
896 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
901 for (b = 0; b < bytes; ) {
902 DPRINTK("%02x: ", b);
903 for (w = 0; b < bytes && w < 4; w++) {
904 (void) pci_read_config_dword(pdev, b, &dw);
912 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
913 struct pci_dev *pdev)
916 void __iomem *hc_base = mv_hc_base(mmio_base,
917 port >> MV_PORT_HC_SHIFT);
918 void __iomem *port_base;
919 int start_port, num_ports, p, start_hc, num_hcs, hc;
922 start_hc = start_port = 0;
923 num_ports = 8; /* shld be benign for 4 port devs */
926 start_hc = port >> MV_PORT_HC_SHIFT;
928 num_ports = num_hcs = 1;
930 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
931 num_ports > 1 ? num_ports - 1 : start_port);
934 DPRINTK("PCI config space regs:\n");
935 mv_dump_pci_cfg(pdev, 0x68);
937 DPRINTK("PCI regs:\n");
938 mv_dump_mem(mmio_base+0xc00, 0x3c);
939 mv_dump_mem(mmio_base+0xd00, 0x34);
940 mv_dump_mem(mmio_base+0xf00, 0x4);
941 mv_dump_mem(mmio_base+0x1d00, 0x6c);
942 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
943 hc_base = mv_hc_base(mmio_base, hc);
944 DPRINTK("HC regs (HC %i):\n", hc);
945 mv_dump_mem(hc_base, 0x1c);
947 for (p = start_port; p < start_port + num_ports; p++) {
948 port_base = mv_port_base(mmio_base, p);
949 DPRINTK("EDMA regs (port %i):\n", p);
950 mv_dump_mem(port_base, 0x54);
951 DPRINTK("SATA regs (port %i):\n", p);
952 mv_dump_mem(port_base+0x300, 0x60);
957 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
965 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
968 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
977 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
979 unsigned int ofs = mv_scr_offset(sc_reg_in);
981 if (ofs != 0xffffffffU) {
982 *val = readl(mv_ap_base(ap) + ofs);
988 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
990 unsigned int ofs = mv_scr_offset(sc_reg_in);
992 if (ofs != 0xffffffffU) {
993 writelfl(val, mv_ap_base(ap) + ofs);
999 static void mv6_dev_config(struct ata_device *adev)
1002 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1003 * See mv_qc_prep() for more info.
1005 if (adev->flags & ATA_DFLAG_NCQ)
1006 if (adev->max_sectors > ATA_MAX_SECTORS)
1007 adev->max_sectors = ATA_MAX_SECTORS;
1010 static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1011 void __iomem *port_mmio, int want_ncq)
1015 /* set up non-NCQ EDMA configuration */
1016 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1018 if (IS_GEN_I(hpriv))
1019 cfg |= (1 << 8); /* enab config burst size mask */
1021 else if (IS_GEN_II(hpriv))
1022 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1024 else if (IS_GEN_IIE(hpriv)) {
1025 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1026 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1027 cfg |= (1 << 18); /* enab early completion */
1028 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1032 cfg |= EDMA_CFG_NCQ;
1033 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1035 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1037 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1040 static void mv_port_free_dma_mem(struct ata_port *ap)
1042 struct mv_host_priv *hpriv = ap->host->private_data;
1043 struct mv_port_priv *pp = ap->private_data;
1047 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1051 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1055 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1056 * For later hardware, we have one unique sg_tbl per NCQ tag.
1058 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1059 if (pp->sg_tbl[tag]) {
1060 if (tag == 0 || !IS_GEN_I(hpriv))
1061 dma_pool_free(hpriv->sg_tbl_pool,
1063 pp->sg_tbl_dma[tag]);
1064 pp->sg_tbl[tag] = NULL;
1070 * mv_port_start - Port specific init/start routine.
1071 * @ap: ATA channel to manipulate
1073 * Allocate and point to DMA memory, init port private memory,
1077 * Inherited from caller.
1079 static int mv_port_start(struct ata_port *ap)
1081 struct device *dev = ap->host->dev;
1082 struct mv_host_priv *hpriv = ap->host->private_data;
1083 struct mv_port_priv *pp;
1084 void __iomem *port_mmio = mv_ap_base(ap);
1085 unsigned long flags;
1088 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1091 ap->private_data = pp;
1093 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1096 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1098 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1100 goto out_port_free_dma_mem;
1101 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1104 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1105 * For later hardware, we need one unique sg_tbl per NCQ tag.
1107 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1108 if (tag == 0 || !IS_GEN_I(hpriv)) {
1109 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1110 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1111 if (!pp->sg_tbl[tag])
1112 goto out_port_free_dma_mem;
1114 pp->sg_tbl[tag] = pp->sg_tbl[0];
1115 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1119 spin_lock_irqsave(&ap->host->lock, flags);
1121 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1122 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1124 spin_unlock_irqrestore(&ap->host->lock, flags);
1126 /* Don't turn on EDMA here...do it before DMA commands only. Else
1127 * we'll be unable to send non-data, PIO, etc due to restricted access
1132 out_port_free_dma_mem:
1133 mv_port_free_dma_mem(ap);
1138 * mv_port_stop - Port specific cleanup/stop routine.
1139 * @ap: ATA channel to manipulate
1141 * Stop DMA, cleanup port memory.
1144 * This routine uses the host lock to protect the DMA stop.
1146 static void mv_port_stop(struct ata_port *ap)
1149 mv_port_free_dma_mem(ap);
1153 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1154 * @qc: queued command whose SG list to source from
1156 * Populate the SG list and mark the last entry.
1159 * Inherited from caller.
1161 static void mv_fill_sg(struct ata_queued_cmd *qc)
1163 struct mv_port_priv *pp = qc->ap->private_data;
1164 struct scatterlist *sg;
1165 struct mv_sg *mv_sg, *last_sg = NULL;
1168 mv_sg = pp->sg_tbl[qc->tag];
1169 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1170 dma_addr_t addr = sg_dma_address(sg);
1171 u32 sg_len = sg_dma_len(sg);
1174 u32 offset = addr & 0xffff;
1177 if ((offset + sg_len > 0x10000))
1178 len = 0x10000 - offset;
1180 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1181 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1182 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1192 if (likely(last_sg))
1193 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1196 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1198 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1199 (last ? CRQB_CMD_LAST : 0);
1200 *cmdw = cpu_to_le16(tmp);
1204 * mv_qc_prep - Host specific command preparation.
1205 * @qc: queued command to prepare
1207 * This routine simply redirects to the general purpose routine
1208 * if command is not DMA. Else, it handles prep of the CRQB
1209 * (command request block), does some sanity checking, and calls
1210 * the SG load routine.
1213 * Inherited from caller.
1215 static void mv_qc_prep(struct ata_queued_cmd *qc)
1217 struct ata_port *ap = qc->ap;
1218 struct mv_port_priv *pp = ap->private_data;
1220 struct ata_taskfile *tf;
1224 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1225 (qc->tf.protocol != ATA_PROT_NCQ))
1228 /* Fill in command request block
1230 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1231 flags |= CRQB_FLAG_READ;
1232 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1233 flags |= qc->tag << CRQB_TAG_SHIFT;
1235 /* get current queue index from software */
1236 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1238 pp->crqb[in_index].sg_addr =
1239 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1240 pp->crqb[in_index].sg_addr_hi =
1241 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1242 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1244 cw = &pp->crqb[in_index].ata_cmd[0];
1247 /* Sadly, the CRQB cannot accomodate all registers--there are
1248 * only 11 bytes...so we must pick and choose required
1249 * registers based on the command. So, we drop feature and
1250 * hob_feature for [RW] DMA commands, but they are needed for
1251 * NCQ. NCQ will drop hob_nsect.
1253 switch (tf->command) {
1255 case ATA_CMD_READ_EXT:
1257 case ATA_CMD_WRITE_EXT:
1258 case ATA_CMD_WRITE_FUA_EXT:
1259 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1261 case ATA_CMD_FPDMA_READ:
1262 case ATA_CMD_FPDMA_WRITE:
1263 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1264 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1267 /* The only other commands EDMA supports in non-queued and
1268 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1269 * of which are defined/used by Linux. If we get here, this
1270 * driver needs work.
1272 * FIXME: modify libata to give qc_prep a return value and
1273 * return error here.
1275 BUG_ON(tf->command);
1278 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1279 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1280 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1281 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1282 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1283 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1284 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1285 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1286 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1288 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1294 * mv_qc_prep_iie - Host specific command preparation.
1295 * @qc: queued command to prepare
1297 * This routine simply redirects to the general purpose routine
1298 * if command is not DMA. Else, it handles prep of the CRQB
1299 * (command request block), does some sanity checking, and calls
1300 * the SG load routine.
1303 * Inherited from caller.
1305 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1307 struct ata_port *ap = qc->ap;
1308 struct mv_port_priv *pp = ap->private_data;
1309 struct mv_crqb_iie *crqb;
1310 struct ata_taskfile *tf;
1314 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1315 (qc->tf.protocol != ATA_PROT_NCQ))
1318 /* Fill in Gen IIE command request block
1320 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1321 flags |= CRQB_FLAG_READ;
1323 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1324 flags |= qc->tag << CRQB_TAG_SHIFT;
1325 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1327 /* get current queue index from software */
1328 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1330 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1331 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1332 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1333 crqb->flags = cpu_to_le32(flags);
1336 crqb->ata_cmd[0] = cpu_to_le32(
1337 (tf->command << 16) |
1340 crqb->ata_cmd[1] = cpu_to_le32(
1346 crqb->ata_cmd[2] = cpu_to_le32(
1347 (tf->hob_lbal << 0) |
1348 (tf->hob_lbam << 8) |
1349 (tf->hob_lbah << 16) |
1350 (tf->hob_feature << 24)
1352 crqb->ata_cmd[3] = cpu_to_le32(
1354 (tf->hob_nsect << 8)
1357 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1363 * mv_qc_issue - Initiate a command to the host
1364 * @qc: queued command to start
1366 * This routine simply redirects to the general purpose routine
1367 * if command is not DMA. Else, it sanity checks our local
1368 * caches of the request producer/consumer indices then enables
1369 * DMA and bumps the request producer index.
1372 * Inherited from caller.
1374 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1376 struct ata_port *ap = qc->ap;
1377 void __iomem *port_mmio = mv_ap_base(ap);
1378 struct mv_port_priv *pp = ap->private_data;
1381 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1382 (qc->tf.protocol != ATA_PROT_NCQ)) {
1383 /* We're about to send a non-EDMA capable command to the
1384 * port. Turn off EDMA so there won't be problems accessing
1385 * shadow block, etc registers.
1388 return ata_qc_issue_prot(qc);
1391 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1395 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1397 /* and write the request in pointer to kick the EDMA to life */
1398 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1399 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1405 * mv_err_intr - Handle error interrupts on the port
1406 * @ap: ATA channel to manipulate
1407 * @reset_allowed: bool: 0 == don't trigger from reset here
1409 * In most cases, just clear the interrupt and move on. However,
1410 * some cases require an eDMA reset, which is done right before
1411 * the COMRESET in mv_phy_reset(). The SERR case requires a
1412 * clear of pending errors in the SATA SERROR register. Finally,
1413 * if the port disabled DMA, update our cached copy to match.
1416 * Inherited from caller.
1418 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1420 void __iomem *port_mmio = mv_ap_base(ap);
1421 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1422 struct mv_port_priv *pp = ap->private_data;
1423 struct mv_host_priv *hpriv = ap->host->private_data;
1424 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1425 unsigned int action = 0, err_mask = 0;
1426 struct ata_eh_info *ehi = &ap->link.eh_info;
1428 ata_ehi_clear_desc(ehi);
1430 if (!edma_enabled) {
1431 /* just a guess: do we need to do this? should we
1432 * expand this, and do it in all cases?
1434 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1435 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1438 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1440 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1443 * all generations share these EDMA error cause bits
1446 if (edma_err_cause & EDMA_ERR_DEV)
1447 err_mask |= AC_ERR_DEV;
1448 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1449 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1450 EDMA_ERR_INTRL_PAR)) {
1451 err_mask |= AC_ERR_ATA_BUS;
1452 action |= ATA_EH_RESET;
1453 ata_ehi_push_desc(ehi, "parity error");
1455 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1456 ata_ehi_hotplugged(ehi);
1457 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1458 "dev disconnect" : "dev connect");
1459 action |= ATA_EH_RESET;
1462 if (IS_GEN_I(hpriv)) {
1463 eh_freeze_mask = EDMA_EH_FREEZE_5;
1465 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1466 pp = ap->private_data;
1467 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1468 ata_ehi_push_desc(ehi, "EDMA self-disable");
1471 eh_freeze_mask = EDMA_EH_FREEZE;
1473 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1474 pp = ap->private_data;
1475 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1476 ata_ehi_push_desc(ehi, "EDMA self-disable");
1479 if (edma_err_cause & EDMA_ERR_SERR) {
1480 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1481 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1482 err_mask = AC_ERR_ATA_BUS;
1483 action |= ATA_EH_RESET;
1487 /* Clear EDMA now that SERR cleanup done */
1488 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1491 err_mask = AC_ERR_OTHER;
1492 action |= ATA_EH_RESET;
1495 ehi->serror |= serr;
1496 ehi->action |= action;
1499 qc->err_mask |= err_mask;
1501 ehi->err_mask |= err_mask;
1503 if (edma_err_cause & eh_freeze_mask)
1504 ata_port_freeze(ap);
1509 static void mv_intr_pio(struct ata_port *ap)
1511 struct ata_queued_cmd *qc;
1514 /* ignore spurious intr if drive still BUSY */
1515 ata_status = readb(ap->ioaddr.status_addr);
1516 if (unlikely(ata_status & ATA_BUSY))
1519 /* get active ATA command */
1520 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1521 if (unlikely(!qc)) /* no active tag */
1523 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1526 /* and finally, complete the ATA command */
1527 qc->err_mask |= ac_err_mask(ata_status);
1528 ata_qc_complete(qc);
1531 static void mv_intr_edma(struct ata_port *ap)
1533 void __iomem *port_mmio = mv_ap_base(ap);
1534 struct mv_host_priv *hpriv = ap->host->private_data;
1535 struct mv_port_priv *pp = ap->private_data;
1536 struct ata_queued_cmd *qc;
1537 u32 out_index, in_index;
1538 bool work_done = false;
1540 /* get h/w response queue pointer */
1541 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1542 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1548 /* get s/w response queue last-read pointer, and compare */
1549 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1550 if (in_index == out_index)
1553 /* 50xx: get active ATA command */
1554 if (IS_GEN_I(hpriv))
1555 tag = ap->link.active_tag;
1557 /* Gen II/IIE: get active ATA command via tag, to enable
1558 * support for queueing. this works transparently for
1559 * queued and non-queued modes.
1562 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1564 qc = ata_qc_from_tag(ap, tag);
1566 /* For non-NCQ mode, the lower 8 bits of status
1567 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1568 * which should be zero if all went well.
1570 status = le16_to_cpu(pp->crpb[out_index].flags);
1571 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1572 mv_err_intr(ap, qc);
1576 /* and finally, complete the ATA command */
1579 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1580 ata_qc_complete(qc);
1583 /* advance software response queue pointer, to
1584 * indicate (after the loop completes) to hardware
1585 * that we have consumed a response queue entry.
1592 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1593 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1594 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1598 * mv_host_intr - Handle all interrupts on the given host controller
1599 * @host: host specific structure
1600 * @relevant: port error bits relevant to this host controller
1601 * @hc: which host controller we're to look at
1603 * Read then write clear the HC interrupt status then walk each
1604 * port connected to the HC and see if it needs servicing. Port
1605 * success ints are reported in the HC interrupt status reg, the
1606 * port error ints are reported in the higher level main
1607 * interrupt status register and thus are passed in via the
1608 * 'relevant' argument.
1611 * Inherited from caller.
1613 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1615 struct mv_host_priv *hpriv = host->private_data;
1616 void __iomem *mmio = hpriv->base;
1617 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1619 int port, port0, last_port;
1624 port0 = MV_PORTS_PER_HC;
1627 last_port = port0 + MV_PORTS_PER_HC;
1629 last_port = port0 + hpriv->n_ports;
1630 /* we'll need the HC success int register in most cases */
1631 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1635 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1637 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1638 hc, relevant, hc_irq_cause);
1640 for (port = port0; port < last_port; port++) {
1641 struct ata_port *ap = host->ports[port];
1642 struct mv_port_priv *pp;
1643 int have_err_bits, hard_port, shift;
1645 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1648 pp = ap->private_data;
1650 shift = port << 1; /* (port * 2) */
1651 if (port >= MV_PORTS_PER_HC) {
1652 shift++; /* skip bit 8 in the HC Main IRQ reg */
1654 have_err_bits = ((PORT0_ERR << shift) & relevant);
1656 if (unlikely(have_err_bits)) {
1657 struct ata_queued_cmd *qc;
1659 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1660 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1663 mv_err_intr(ap, qc);
1667 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1669 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1670 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1673 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1680 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1682 struct mv_host_priv *hpriv = host->private_data;
1683 struct ata_port *ap;
1684 struct ata_queued_cmd *qc;
1685 struct ata_eh_info *ehi;
1686 unsigned int i, err_mask, printed = 0;
1689 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1691 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1694 DPRINTK("All regs @ PCI error\n");
1695 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1697 writelfl(0, mmio + hpriv->irq_cause_ofs);
1699 for (i = 0; i < host->n_ports; i++) {
1700 ap = host->ports[i];
1701 if (!ata_link_offline(&ap->link)) {
1702 ehi = &ap->link.eh_info;
1703 ata_ehi_clear_desc(ehi);
1705 ata_ehi_push_desc(ehi,
1706 "PCI err cause 0x%08x", err_cause);
1707 err_mask = AC_ERR_HOST_BUS;
1708 ehi->action = ATA_EH_RESET;
1709 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1711 qc->err_mask |= err_mask;
1713 ehi->err_mask |= err_mask;
1715 ata_port_freeze(ap);
1721 * mv_interrupt - Main interrupt event handler
1723 * @dev_instance: private data; in this case the host structure
1725 * Read the read only register to determine if any host
1726 * controllers have pending interrupts. If so, call lower level
1727 * routine to handle. Also check for PCI errors which are only
1731 * This routine holds the host lock while processing pending
1734 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1736 struct ata_host *host = dev_instance;
1737 struct mv_host_priv *hpriv = host->private_data;
1738 unsigned int hc, handled = 0, n_hcs;
1739 void __iomem *mmio = hpriv->base;
1740 u32 irq_stat, irq_mask;
1742 spin_lock(&host->lock);
1744 irq_stat = readl(hpriv->main_cause_reg_addr);
1745 irq_mask = readl(hpriv->main_mask_reg_addr);
1747 /* check the cases where we either have nothing pending or have read
1748 * a bogus register value which can indicate HW removal or PCI fault
1750 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1753 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1755 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1756 mv_pci_error(host, mmio);
1758 goto out_unlock; /* skip all other HC irq handling */
1761 for (hc = 0; hc < n_hcs; hc++) {
1762 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1764 mv_host_intr(host, relevant, hc);
1770 spin_unlock(&host->lock);
1772 return IRQ_RETVAL(handled);
1775 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1777 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1778 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1780 return hc_mmio + ofs;
1783 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1787 switch (sc_reg_in) {
1791 ofs = sc_reg_in * sizeof(u32);
1800 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1802 struct mv_host_priv *hpriv = ap->host->private_data;
1803 void __iomem *mmio = hpriv->base;
1804 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1805 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1807 if (ofs != 0xffffffffU) {
1808 *val = readl(addr + ofs);
1814 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1816 struct mv_host_priv *hpriv = ap->host->private_data;
1817 void __iomem *mmio = hpriv->base;
1818 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1819 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1821 if (ofs != 0xffffffffU) {
1822 writelfl(val, addr + ofs);
1828 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1830 struct pci_dev *pdev = to_pci_dev(host->dev);
1833 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1836 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1838 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1841 mv_reset_pci_bus(host, mmio);
1844 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1846 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1849 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1852 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1855 tmp = readl(phy_mmio + MV5_PHY_MODE);
1857 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1858 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1861 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1865 writel(0, mmio + MV_GPIO_PORT_CTL);
1867 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1869 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1871 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1874 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1877 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1878 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1880 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1883 tmp = readl(phy_mmio + MV5_LT_MODE);
1885 writel(tmp, phy_mmio + MV5_LT_MODE);
1887 tmp = readl(phy_mmio + MV5_PHY_CTL);
1890 writel(tmp, phy_mmio + MV5_PHY_CTL);
1893 tmp = readl(phy_mmio + MV5_PHY_MODE);
1895 tmp |= hpriv->signal[port].pre;
1896 tmp |= hpriv->signal[port].amps;
1897 writel(tmp, phy_mmio + MV5_PHY_MODE);
1902 #define ZERO(reg) writel(0, port_mmio + (reg))
1903 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1906 void __iomem *port_mmio = mv_port_base(mmio, port);
1908 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1910 mv_channel_reset(hpriv, mmio, port);
1912 ZERO(0x028); /* command */
1913 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1914 ZERO(0x004); /* timer */
1915 ZERO(0x008); /* irq err cause */
1916 ZERO(0x00c); /* irq err mask */
1917 ZERO(0x010); /* rq bah */
1918 ZERO(0x014); /* rq inp */
1919 ZERO(0x018); /* rq outp */
1920 ZERO(0x01c); /* respq bah */
1921 ZERO(0x024); /* respq outp */
1922 ZERO(0x020); /* respq inp */
1923 ZERO(0x02c); /* test control */
1924 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1928 #define ZERO(reg) writel(0, hc_mmio + (reg))
1929 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1932 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1940 tmp = readl(hc_mmio + 0x20);
1943 writel(tmp, hc_mmio + 0x20);
1947 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1950 unsigned int hc, port;
1952 for (hc = 0; hc < n_hc; hc++) {
1953 for (port = 0; port < MV_PORTS_PER_HC; port++)
1954 mv5_reset_hc_port(hpriv, mmio,
1955 (hc * MV_PORTS_PER_HC) + port);
1957 mv5_reset_one_hc(hpriv, mmio, hc);
1964 #define ZERO(reg) writel(0, mmio + (reg))
1965 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1967 struct mv_host_priv *hpriv = host->private_data;
1970 tmp = readl(mmio + MV_PCI_MODE);
1972 writel(tmp, mmio + MV_PCI_MODE);
1974 ZERO(MV_PCI_DISC_TIMER);
1975 ZERO(MV_PCI_MSI_TRIGGER);
1976 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1977 ZERO(HC_MAIN_IRQ_MASK_OFS);
1978 ZERO(MV_PCI_SERR_MASK);
1979 ZERO(hpriv->irq_cause_ofs);
1980 ZERO(hpriv->irq_mask_ofs);
1981 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1982 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1983 ZERO(MV_PCI_ERR_ATTRIBUTE);
1984 ZERO(MV_PCI_ERR_COMMAND);
1988 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1992 mv5_reset_flash(hpriv, mmio);
1994 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1996 tmp |= (1 << 5) | (1 << 6);
1997 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2001 * mv6_reset_hc - Perform the 6xxx global soft reset
2002 * @mmio: base address of the HBA
2004 * This routine only applies to 6xxx parts.
2007 * Inherited from caller.
2009 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2012 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2016 /* Following procedure defined in PCI "main command and status
2020 writel(t | STOP_PCI_MASTER, reg);
2022 for (i = 0; i < 1000; i++) {
2025 if (PCI_MASTER_EMPTY & t)
2028 if (!(PCI_MASTER_EMPTY & t)) {
2029 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2037 writel(t | GLOB_SFT_RST, reg);
2040 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2042 if (!(GLOB_SFT_RST & t)) {
2043 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2048 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2051 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2054 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2056 if (GLOB_SFT_RST & t) {
2057 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2064 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2067 void __iomem *port_mmio;
2070 tmp = readl(mmio + MV_RESET_CFG);
2071 if ((tmp & (1 << 0)) == 0) {
2072 hpriv->signal[idx].amps = 0x7 << 8;
2073 hpriv->signal[idx].pre = 0x1 << 5;
2077 port_mmio = mv_port_base(mmio, idx);
2078 tmp = readl(port_mmio + PHY_MODE2);
2080 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2081 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2084 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2086 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2089 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2092 void __iomem *port_mmio = mv_port_base(mmio, port);
2094 u32 hp_flags = hpriv->hp_flags;
2096 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2098 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2101 if (fix_phy_mode2) {
2102 m2 = readl(port_mmio + PHY_MODE2);
2105 writel(m2, port_mmio + PHY_MODE2);
2109 m2 = readl(port_mmio + PHY_MODE2);
2110 m2 &= ~((1 << 16) | (1 << 31));
2111 writel(m2, port_mmio + PHY_MODE2);
2116 /* who knows what this magic does */
2117 tmp = readl(port_mmio + PHY_MODE3);
2120 writel(tmp, port_mmio + PHY_MODE3);
2122 if (fix_phy_mode4) {
2125 m4 = readl(port_mmio + PHY_MODE4);
2127 if (hp_flags & MV_HP_ERRATA_60X1B2)
2128 tmp = readl(port_mmio + 0x310);
2130 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2132 writel(m4, port_mmio + PHY_MODE4);
2134 if (hp_flags & MV_HP_ERRATA_60X1B2)
2135 writel(tmp, port_mmio + 0x310);
2138 /* Revert values of pre-emphasis and signal amps to the saved ones */
2139 m2 = readl(port_mmio + PHY_MODE2);
2141 m2 &= ~MV_M2_PREAMP_MASK;
2142 m2 |= hpriv->signal[port].amps;
2143 m2 |= hpriv->signal[port].pre;
2146 /* according to mvSata 3.6.1, some IIE values are fixed */
2147 if (IS_GEN_IIE(hpriv)) {
2152 writel(m2, port_mmio + PHY_MODE2);
2155 /* TODO: use the generic LED interface to configure the SATA Presence */
2156 /* & Acitivy LEDs on the board */
2157 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2163 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2166 void __iomem *port_mmio;
2169 port_mmio = mv_port_base(mmio, idx);
2170 tmp = readl(port_mmio + PHY_MODE2);
2172 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2173 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2177 #define ZERO(reg) writel(0, port_mmio + (reg))
2178 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2179 void __iomem *mmio, unsigned int port)
2181 void __iomem *port_mmio = mv_port_base(mmio, port);
2183 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2185 mv_channel_reset(hpriv, mmio, port);
2187 ZERO(0x028); /* command */
2188 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2189 ZERO(0x004); /* timer */
2190 ZERO(0x008); /* irq err cause */
2191 ZERO(0x00c); /* irq err mask */
2192 ZERO(0x010); /* rq bah */
2193 ZERO(0x014); /* rq inp */
2194 ZERO(0x018); /* rq outp */
2195 ZERO(0x01c); /* respq bah */
2196 ZERO(0x024); /* respq outp */
2197 ZERO(0x020); /* respq inp */
2198 ZERO(0x02c); /* test control */
2199 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2204 #define ZERO(reg) writel(0, hc_mmio + (reg))
2205 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2208 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2218 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int n_hc)
2223 for (port = 0; port < hpriv->n_ports; port++)
2224 mv_soc_reset_hc_port(hpriv, mmio, port);
2226 mv_soc_reset_one_hc(hpriv, mmio);
2231 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2237 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2242 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2243 unsigned int port_no)
2245 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2247 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2249 if (IS_GEN_II(hpriv)) {
2250 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2251 ifctl |= (1 << 7); /* enable gen2i speed */
2252 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2253 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2256 udelay(25); /* allow reset propagation */
2258 /* Spec never mentions clearing the bit. Marvell's driver does
2259 * clear the bit, however.
2261 writelfl(0, port_mmio + EDMA_CMD_OFS);
2263 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2265 if (IS_GEN_I(hpriv))
2270 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2271 * @ap: ATA channel to manipulate
2273 * Part of this is taken from __sata_phy_reset and modified to
2274 * not sleep since this routine gets called from interrupt level.
2277 * Inherited from caller. This is coded to safe to call at
2278 * interrupt level, i.e. it does not sleep.
2280 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2281 unsigned long deadline)
2283 struct mv_port_priv *pp = ap->private_data;
2284 struct mv_host_priv *hpriv = ap->host->private_data;
2285 void __iomem *port_mmio = mv_ap_base(ap);
2289 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2293 u32 sstatus, serror, scontrol;
2295 mv_scr_read(ap, SCR_STATUS, &sstatus);
2296 mv_scr_read(ap, SCR_ERROR, &serror);
2297 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2298 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2299 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2303 /* Issue COMRESET via SControl */
2305 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
2308 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
2312 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
2313 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2317 } while (time_before(jiffies, deadline));
2319 /* work around errata */
2320 if (IS_GEN_II(hpriv) &&
2321 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2323 goto comreset_retry;
2327 u32 sstatus, serror, scontrol;
2329 mv_scr_read(ap, SCR_STATUS, &sstatus);
2330 mv_scr_read(ap, SCR_ERROR, &serror);
2331 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2332 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2333 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2337 if (ata_link_offline(&ap->link)) {
2338 *class = ATA_DEV_NONE;
2342 /* even after SStatus reflects that device is ready,
2343 * it seems to take a while for link to be fully
2344 * established (and thus Status no longer 0x80/0x7F),
2345 * so we poll a bit for that, here.
2349 u8 drv_stat = ata_check_status(ap);
2350 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2355 if (time_after(jiffies, deadline))
2359 /* FIXME: if we passed the deadline, the following
2360 * code probably produces an invalid result
2363 /* finally, read device signature from TF registers */
2364 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
2366 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2368 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2373 static int mv_prereset(struct ata_link *link, unsigned long deadline)
2375 struct ata_port *ap = link->ap;
2376 struct mv_port_priv *pp = ap->private_data;
2380 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
2381 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2386 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2387 unsigned long deadline)
2389 struct ata_port *ap = link->ap;
2390 struct mv_host_priv *hpriv = ap->host->private_data;
2391 void __iomem *mmio = hpriv->base;
2395 mv_channel_reset(hpriv, mmio, ap->port_no);
2397 mv_phy_reset(ap, class, deadline);
2402 static void mv_postreset(struct ata_link *link, unsigned int *classes)
2404 struct ata_port *ap = link->ap;
2407 /* print link status */
2408 sata_print_link_status(link);
2411 sata_scr_read(link, SCR_ERROR, &serr);
2412 sata_scr_write_flush(link, SCR_ERROR, serr);
2414 /* bail out if no device is present */
2415 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2416 DPRINTK("EXIT, no device\n");
2420 /* set up device control */
2421 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2424 static void mv_eh_freeze(struct ata_port *ap)
2426 struct mv_host_priv *hpriv = ap->host->private_data;
2427 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2431 /* FIXME: handle coalescing completion events properly */
2433 shift = ap->port_no * 2;
2437 mask = 0x3 << shift;
2439 /* disable assertion of portN err, done events */
2440 tmp = readl(hpriv->main_mask_reg_addr);
2441 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2444 static void mv_eh_thaw(struct ata_port *ap)
2446 struct mv_host_priv *hpriv = ap->host->private_data;
2447 void __iomem *mmio = hpriv->base;
2448 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2449 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2450 void __iomem *port_mmio = mv_ap_base(ap);
2451 u32 tmp, mask, hc_irq_cause;
2452 unsigned int shift, hc_port_no = ap->port_no;
2454 /* FIXME: handle coalescing completion events properly */
2456 shift = ap->port_no * 2;
2462 mask = 0x3 << shift;
2464 /* clear EDMA errors on this port */
2465 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2467 /* clear pending irq events */
2468 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2469 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2470 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2471 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2473 /* enable assertion of portN err, done events */
2474 tmp = readl(hpriv->main_mask_reg_addr);
2475 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2479 * mv_port_init - Perform some early initialization on a single port.
2480 * @port: libata data structure storing shadow register addresses
2481 * @port_mmio: base address of the port
2483 * Initialize shadow register mmio addresses, clear outstanding
2484 * interrupts on the port, and unmask interrupts for the future
2485 * start of the port.
2488 * Inherited from caller.
2490 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2492 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2495 /* PIO related setup
2497 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2499 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2500 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2501 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2502 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2503 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2504 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2506 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2507 /* special case: control/altstatus doesn't have ATA_REG_ address */
2508 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2511 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2513 /* Clear any currently outstanding port interrupt conditions */
2514 serr_ofs = mv_scr_offset(SCR_ERROR);
2515 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2516 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2518 /* unmask all non-transient EDMA error interrupts */
2519 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2521 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2522 readl(port_mmio + EDMA_CFG_OFS),
2523 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2524 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2527 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2529 struct pci_dev *pdev = to_pci_dev(host->dev);
2530 struct mv_host_priv *hpriv = host->private_data;
2531 u32 hp_flags = hpriv->hp_flags;
2533 switch (board_idx) {
2535 hpriv->ops = &mv5xxx_ops;
2536 hp_flags |= MV_HP_GEN_I;
2538 switch (pdev->revision) {
2540 hp_flags |= MV_HP_ERRATA_50XXB0;
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2546 dev_printk(KERN_WARNING, &pdev->dev,
2547 "Applying 50XXB2 workarounds to unknown rev\n");
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2555 hpriv->ops = &mv5xxx_ops;
2556 hp_flags |= MV_HP_GEN_I;
2558 switch (pdev->revision) {
2560 hp_flags |= MV_HP_ERRATA_50XXB0;
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2566 dev_printk(KERN_WARNING, &pdev->dev,
2567 "Applying B2 workarounds to unknown rev\n");
2568 hp_flags |= MV_HP_ERRATA_50XXB2;
2575 hpriv->ops = &mv6xxx_ops;
2576 hp_flags |= MV_HP_GEN_II;
2578 switch (pdev->revision) {
2580 hp_flags |= MV_HP_ERRATA_60X1B2;
2583 hp_flags |= MV_HP_ERRATA_60X1C0;
2586 dev_printk(KERN_WARNING, &pdev->dev,
2587 "Applying B2 workarounds to unknown rev\n");
2588 hp_flags |= MV_HP_ERRATA_60X1B2;
2594 hp_flags |= MV_HP_PCIE;
2595 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2596 (pdev->device == 0x2300 || pdev->device == 0x2310))
2599 * Highpoint RocketRAID PCIe 23xx series cards:
2601 * Unconfigured drives are treated as "Legacy"
2602 * by the BIOS, and it overwrites sector 8 with
2603 * a "Lgcy" metadata block prior to Linux boot.
2605 * Configured drives (RAID or JBOD) leave sector 8
2606 * alone, but instead overwrite a high numbered
2607 * sector for the RAID metadata. This sector can
2608 * be determined exactly, by truncating the physical
2609 * drive capacity to a nice even GB value.
2611 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2613 * Warn the user, lest they think we're just buggy.
2615 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2616 " BIOS CORRUPTS DATA on all attached drives,"
2617 " regardless of if/how they are configured."
2619 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2620 " use sectors 8-9 on \"Legacy\" drives,"
2621 " and avoid the final two gigabytes on"
2622 " all RocketRAID BIOS initialized drives.\n");
2625 hpriv->ops = &mv6xxx_ops;
2626 hp_flags |= MV_HP_GEN_IIE;
2628 switch (pdev->revision) {
2630 hp_flags |= MV_HP_ERRATA_XX42A0;
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2636 dev_printk(KERN_WARNING, &pdev->dev,
2637 "Applying 60X1C0 workarounds to unknown rev\n");
2638 hp_flags |= MV_HP_ERRATA_60X1C0;
2643 hpriv->ops = &mv_soc_ops;
2644 hp_flags |= MV_HP_ERRATA_60X1C0;
2648 dev_printk(KERN_ERR, host->dev,
2649 "BUG: invalid board index %u\n", board_idx);
2653 hpriv->hp_flags = hp_flags;
2654 if (hp_flags & MV_HP_PCIE) {
2655 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2656 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2657 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2659 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2660 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2661 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2668 * mv_init_host - Perform some early initialization of the host.
2669 * @host: ATA host to initialize
2670 * @board_idx: controller index
2672 * If possible, do an early global reset of the host. Then do
2673 * our port init and clear/unmask all/relevant host interrupts.
2676 * Inherited from caller.
2678 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2680 int rc = 0, n_hc, port, hc;
2681 struct mv_host_priv *hpriv = host->private_data;
2682 void __iomem *mmio = hpriv->base;
2684 rc = mv_chip_id(host, board_idx);
2688 if (HAS_PCI(host)) {
2689 hpriv->main_cause_reg_addr = hpriv->base +
2690 HC_MAIN_IRQ_CAUSE_OFS;
2691 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2693 hpriv->main_cause_reg_addr = hpriv->base +
2694 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2695 hpriv->main_mask_reg_addr = hpriv->base +
2696 HC_SOC_MAIN_IRQ_MASK_OFS;
2698 /* global interrupt mask */
2699 writel(0, hpriv->main_mask_reg_addr);
2701 n_hc = mv_get_hc_count(host->ports[0]->flags);
2703 for (port = 0; port < host->n_ports; port++)
2704 hpriv->ops->read_preamp(hpriv, port, mmio);
2706 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2710 hpriv->ops->reset_flash(hpriv, mmio);
2711 hpriv->ops->reset_bus(host, mmio);
2712 hpriv->ops->enable_leds(hpriv, mmio);
2714 for (port = 0; port < host->n_ports; port++) {
2715 if (IS_GEN_II(hpriv)) {
2716 void __iomem *port_mmio = mv_port_base(mmio, port);
2718 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2719 ifctl |= (1 << 7); /* enable gen2i speed */
2720 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2721 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2724 hpriv->ops->phy_errata(hpriv, mmio, port);
2727 for (port = 0; port < host->n_ports; port++) {
2728 struct ata_port *ap = host->ports[port];
2729 void __iomem *port_mmio = mv_port_base(mmio, port);
2731 mv_port_init(&ap->ioaddr, port_mmio);
2734 if (HAS_PCI(host)) {
2735 unsigned int offset = port_mmio - mmio;
2736 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2737 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2742 for (hc = 0; hc < n_hc; hc++) {
2743 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2745 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2746 "(before clear)=0x%08x\n", hc,
2747 readl(hc_mmio + HC_CFG_OFS),
2748 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2750 /* Clear any currently outstanding hc interrupt conditions */
2751 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2754 if (HAS_PCI(host)) {
2755 /* Clear any currently outstanding host interrupt conditions */
2756 writelfl(0, mmio + hpriv->irq_cause_ofs);
2758 /* and unmask interrupt generation for host regs */
2759 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2760 if (IS_GEN_I(hpriv))
2761 writelfl(~HC_MAIN_MASKED_IRQS_5,
2762 hpriv->main_mask_reg_addr);
2764 writelfl(~HC_MAIN_MASKED_IRQS,
2765 hpriv->main_mask_reg_addr);
2767 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2768 "PCI int cause/mask=0x%08x/0x%08x\n",
2769 readl(hpriv->main_cause_reg_addr),
2770 readl(hpriv->main_mask_reg_addr),
2771 readl(mmio + hpriv->irq_cause_ofs),
2772 readl(mmio + hpriv->irq_mask_ofs));
2774 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2775 hpriv->main_mask_reg_addr);
2776 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2777 readl(hpriv->main_cause_reg_addr),
2778 readl(hpriv->main_mask_reg_addr));
2784 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2786 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2788 if (!hpriv->crqb_pool)
2791 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2793 if (!hpriv->crpb_pool)
2796 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2798 if (!hpriv->sg_tbl_pool)
2805 * mv_platform_probe - handle a positive probe of an soc Marvell
2807 * @pdev: platform device found
2810 * Inherited from caller.
2812 static int mv_platform_probe(struct platform_device *pdev)
2814 static int printed_version;
2815 const struct mv_sata_platform_data *mv_platform_data;
2816 const struct ata_port_info *ppi[] =
2817 { &mv_port_info[chip_soc], NULL };
2818 struct ata_host *host;
2819 struct mv_host_priv *hpriv;
2820 struct resource *res;
2823 if (!printed_version++)
2824 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2827 * Simple resource validation ..
2829 if (unlikely(pdev->num_resources != 2)) {
2830 dev_err(&pdev->dev, "invalid number of resources\n");
2835 * Get the register base first
2837 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2842 mv_platform_data = pdev->dev.platform_data;
2843 n_ports = mv_platform_data->n_ports;
2845 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2846 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2848 if (!host || !hpriv)
2850 host->private_data = hpriv;
2851 hpriv->n_ports = n_ports;
2854 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2855 res->end - res->start + 1);
2856 hpriv->base -= MV_SATAHC0_REG_BASE;
2858 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2862 /* initialize adapter */
2863 rc = mv_init_host(host, chip_soc);
2867 dev_printk(KERN_INFO, &pdev->dev,
2868 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2871 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2872 IRQF_SHARED, &mv6_sht);
2877 * mv_platform_remove - unplug a platform interface
2878 * @pdev: platform device
2880 * A platform bus SATA device has been unplugged. Perform the needed
2881 * cleanup. Also called on module unload for any active devices.
2883 static int __devexit mv_platform_remove(struct platform_device *pdev)
2885 struct device *dev = &pdev->dev;
2886 struct ata_host *host = dev_get_drvdata(dev);
2888 ata_host_detach(host);
2892 static struct platform_driver mv_platform_driver = {
2893 .probe = mv_platform_probe,
2894 .remove = __devexit_p(mv_platform_remove),
2897 .owner = THIS_MODULE,
2903 static int mv_pci_init_one(struct pci_dev *pdev,
2904 const struct pci_device_id *ent);
2907 static struct pci_driver mv_pci_driver = {
2909 .id_table = mv_pci_tbl,
2910 .probe = mv_pci_init_one,
2911 .remove = ata_pci_remove_one,
2917 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2920 /* move to PCI layer or libata core? */
2921 static int pci_go_64(struct pci_dev *pdev)
2925 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2926 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2928 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2930 dev_printk(KERN_ERR, &pdev->dev,
2931 "64-bit DMA enable failed\n");
2936 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2938 dev_printk(KERN_ERR, &pdev->dev,
2939 "32-bit DMA enable failed\n");
2942 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2944 dev_printk(KERN_ERR, &pdev->dev,
2945 "32-bit consistent DMA enable failed\n");
2954 * mv_print_info - Dump key info to kernel log for perusal.
2955 * @host: ATA host to print info about
2957 * FIXME: complete this.
2960 * Inherited from caller.
2962 static void mv_print_info(struct ata_host *host)
2964 struct pci_dev *pdev = to_pci_dev(host->dev);
2965 struct mv_host_priv *hpriv = host->private_data;
2967 const char *scc_s, *gen;
2969 /* Use this to determine the HW stepping of the chip so we know
2970 * what errata to workaround
2972 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2975 else if (scc == 0x01)
2980 if (IS_GEN_I(hpriv))
2982 else if (IS_GEN_II(hpriv))
2984 else if (IS_GEN_IIE(hpriv))
2989 dev_printk(KERN_INFO, &pdev->dev,
2990 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2991 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2992 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2996 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2997 * @pdev: PCI device found
2998 * @ent: PCI device ID entry for the matched host
3001 * Inherited from caller.
3003 static int mv_pci_init_one(struct pci_dev *pdev,
3004 const struct pci_device_id *ent)
3006 static int printed_version;
3007 unsigned int board_idx = (unsigned int)ent->driver_data;
3008 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3009 struct ata_host *host;
3010 struct mv_host_priv *hpriv;
3013 if (!printed_version++)
3014 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3017 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3019 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3020 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3021 if (!host || !hpriv)
3023 host->private_data = hpriv;
3024 hpriv->n_ports = n_ports;
3026 /* acquire resources */
3027 rc = pcim_enable_device(pdev);
3031 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3033 pcim_pin_device(pdev);
3036 host->iomap = pcim_iomap_table(pdev);
3037 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3039 rc = pci_go_64(pdev);
3043 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3047 /* initialize adapter */
3048 rc = mv_init_host(host, board_idx);
3052 /* Enable interrupts */
3053 if (msi && pci_enable_msi(pdev))
3056 mv_dump_pci_cfg(pdev, 0x68);
3057 mv_print_info(host);
3059 pci_set_master(pdev);
3060 pci_try_set_mwi(pdev);
3061 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3062 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3066 static int mv_platform_probe(struct platform_device *pdev);
3067 static int __devexit mv_platform_remove(struct platform_device *pdev);
3069 static int __init mv_init(void)
3073 rc = pci_register_driver(&mv_pci_driver);
3077 rc = platform_driver_register(&mv_platform_driver);
3081 pci_unregister_driver(&mv_pci_driver);
3086 static void __exit mv_exit(void)
3089 pci_unregister_driver(&mv_pci_driver);
3091 platform_driver_unregister(&mv_platform_driver);
3094 MODULE_AUTHOR("Brett Russ");
3095 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3096 MODULE_LICENSE("GPL");
3097 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3098 MODULE_VERSION(DRV_VERSION);
3099 MODULE_ALIAS("platform:sata_mv");
3102 module_param(msi, int, 0444);
3103 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3106 module_init(mv_init);
3107 module_exit(mv_exit);