2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * --> Errata workaround for NCQ device errors.
30 * --> More errata workarounds for PCI-X.
32 * --> Complete a full errata audit for all chipsets to identify others.
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36 * --> Investigate problems with PCI Message Signalled Interrupts (MSI).
38 * --> Cache frequently-accessed registers in mv_port_priv to reduce overhead.
40 * --> Develop a low-power-consumption strategy, and implement it.
42 * --> [Experiment, low priority] Investigate interrupt coalescing.
43 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
44 * the overhead reduced by interrupt mitigation is quite often not
45 * worth the latency cost.
47 * --> [Experiment, Marvell value added] Is it possible to use target
48 * mode to cross-connect two Linux boxes with Marvell cards? If so,
49 * creating LibATA target mode support would be very interesting.
51 * Target mode, for those without docs, is the ability to directly
52 * connect two SATA ports.
55 #include <linux/kernel.h>
56 #include <linux/module.h>
57 #include <linux/pci.h>
58 #include <linux/init.h>
59 #include <linux/blkdev.h>
60 #include <linux/delay.h>
61 #include <linux/interrupt.h>
62 #include <linux/dmapool.h>
63 #include <linux/dma-mapping.h>
64 #include <linux/device.h>
65 #include <linux/platform_device.h>
66 #include <linux/ata_platform.h>
67 #include <linux/mbus.h>
68 #include <scsi/scsi_host.h>
69 #include <scsi/scsi_cmnd.h>
70 #include <scsi/scsi_device.h>
71 #include <linux/libata.h>
73 #define DRV_NAME "sata_mv"
74 #define DRV_VERSION "1.20"
77 /* BAR's are enumerated in terms of pci_resource_start() terms */
78 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
79 MV_IO_BAR = 2, /* offset 0x18: IO space */
80 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
82 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
83 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
86 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
87 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
88 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
89 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
90 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
91 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
93 MV_SATAHC0_REG_BASE = 0x20000,
94 MV_FLASH_CTL_OFS = 0x1046c,
95 MV_GPIO_PORT_CTL_OFS = 0x104f0,
96 MV_RESET_CFG_OFS = 0x180d8,
98 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
99 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
100 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
101 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
104 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
106 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
107 * CRPB needs alignment on a 256B boundary. Size == 256B
108 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
110 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
111 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
113 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
115 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
116 MV_PORT_HC_SHIFT = 2,
117 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
118 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
119 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
122 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
123 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
124 /* SoC integrated controllers, no PCI interface */
125 MV_FLAG_SOC = (1 << 28),
127 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
128 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
129 ATA_FLAG_PIO_POLLING,
130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 CRQB_FLAG_READ = (1 << 0),
134 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
135 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
136 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
137 CRQB_CMD_ADDR_SHIFT = 8,
138 CRQB_CMD_CS = (0x2 << 11),
139 CRQB_CMD_LAST = (1 << 15),
141 CRPB_FLAG_STATUS_SHIFT = 8,
142 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
143 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
145 EPRD_FLAG_END_OF_TBL = (1 << 31),
147 /* PCI interface registers */
149 PCI_COMMAND_OFS = 0xc00,
150 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
157 MV_PCI_MODE_OFS = 0xd00,
158 MV_PCI_MODE_MASK = 0x30,
160 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
161 MV_PCI_DISC_TIMER = 0xd04,
162 MV_PCI_MSI_TRIGGER = 0xc38,
163 MV_PCI_SERR_MASK = 0xc28,
164 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
165 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
166 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
167 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
168 MV_PCI_ERR_COMMAND = 0x1d50,
170 PCI_IRQ_CAUSE_OFS = 0x1d58,
171 PCI_IRQ_MASK_OFS = 0x1d5c,
172 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
174 PCIE_IRQ_CAUSE_OFS = 0x1900,
175 PCIE_IRQ_MASK_OFS = 0x1910,
176 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
178 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
179 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
180 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
181 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
182 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
183 ERR_IRQ = (1 << 0), /* shift by port # */
184 DONE_IRQ = (1 << 1), /* shift by port # */
185 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
186 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
188 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
189 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
190 PORTS_0_3_COAL_DONE = (1 << 8),
191 PORTS_4_7_COAL_DONE = (1 << 17),
192 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
193 GPIO_INT = (1 << 22),
194 SELF_INT = (1 << 23),
195 TWSI_INT = (1 << 24),
196 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
197 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
198 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
199 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
200 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
201 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
203 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
205 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
207 /* SATAHC registers */
210 HC_IRQ_CAUSE_OFS = 0x14,
211 DMA_IRQ = (1 << 0), /* shift by port # */
212 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
213 DEV_IRQ = (1 << 8), /* shift by port # */
215 /* Shadow block registers */
217 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
220 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
221 SATA_ACTIVE_OFS = 0x350,
222 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
225 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
230 SATA_IFCTL_OFS = 0x344,
231 SATA_TESTCTL_OFS = 0x348,
232 SATA_IFSTAT_OFS = 0x34c,
233 VENDOR_UNIQUE_FIS_OFS = 0x35c,
236 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
237 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
240 MV5_LTMODE_OFS = 0x30,
241 MV5_PHY_CTL_OFS = 0x0C,
242 SATA_INTERFACE_CFG_OFS = 0x050,
244 MV_M2_PREAMP_MASK = 0x7e0,
248 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
249 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
250 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
251 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
252 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
253 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
254 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
256 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
257 EDMA_ERR_IRQ_MASK_OFS = 0xc,
258 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
259 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
260 EDMA_ERR_DEV = (1 << 2), /* device error */
261 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
262 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
263 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
264 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
265 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
266 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
267 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
268 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
269 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
270 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
271 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
273 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
274 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
275 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
276 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
277 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
279 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
281 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
282 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
283 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
284 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
285 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
286 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
288 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
290 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
291 EDMA_ERR_OVERRUN_5 = (1 << 5),
292 EDMA_ERR_UNDERRUN_5 = (1 << 6),
294 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
295 EDMA_ERR_LNK_CTRL_RX_1 |
296 EDMA_ERR_LNK_CTRL_RX_3 |
297 EDMA_ERR_LNK_CTRL_TX,
299 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
309 EDMA_ERR_LNK_CTRL_RX_2 |
310 EDMA_ERR_LNK_DATA_RX |
311 EDMA_ERR_LNK_DATA_TX |
312 EDMA_ERR_TRANS_PROTO,
314 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
319 EDMA_ERR_UNDERRUN_5 |
320 EDMA_ERR_SELF_DIS_5 |
326 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
327 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
329 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
330 EDMA_REQ_Q_PTR_SHIFT = 5,
332 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
333 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
334 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
335 EDMA_RSP_Q_PTR_SHIFT = 3,
337 EDMA_CMD_OFS = 0x28, /* EDMA command register */
338 EDMA_EN = (1 << 0), /* enable EDMA */
339 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
340 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
342 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
343 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
344 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
346 EDMA_IORDY_TMOUT_OFS = 0x34,
347 EDMA_ARB_CFG_OFS = 0x38,
349 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
351 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
353 /* Host private flags (hp_flags) */
354 MV_HP_FLAG_MSI = (1 << 0),
355 MV_HP_ERRATA_50XXB0 = (1 << 1),
356 MV_HP_ERRATA_50XXB2 = (1 << 2),
357 MV_HP_ERRATA_60X1B2 = (1 << 3),
358 MV_HP_ERRATA_60X1C0 = (1 << 4),
359 MV_HP_ERRATA_XX42A0 = (1 << 5),
360 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
361 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
362 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
363 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
365 /* Port private flags (pp_flags) */
366 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
367 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
370 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
371 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
372 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
373 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
374 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
376 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
377 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
380 /* DMA boundary 0xffff is required by the s/g splitting
381 * we need on /length/ in mv_fill-sg().
383 MV_DMA_BOUNDARY = 0xffffU,
385 /* mask of register bits containing lower 32 bits
386 * of EDMA request queue DMA address
388 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390 /* ditto, for response queue */
391 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
405 /* Command ReQuest Block: 32B */
421 /* Command ResPonse Block: 8B */
428 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
436 struct mv_port_priv {
437 struct mv_crqb *crqb;
439 struct mv_crpb *crpb;
441 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
442 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
444 unsigned int req_idx;
445 unsigned int resp_idx;
450 struct mv_port_signal {
455 struct mv_host_priv {
457 struct mv_port_signal signal[8];
458 const struct mv_hw_ops *ops;
461 void __iomem *main_irq_cause_addr;
462 void __iomem *main_irq_mask_addr;
467 * These consistent DMA memory pools give us guaranteed
468 * alignment for hardware-accessed data structures,
469 * and less memory waste in accomplishing the alignment.
471 struct dma_pool *crqb_pool;
472 struct dma_pool *crpb_pool;
473 struct dma_pool *sg_tbl_pool;
477 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
480 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
485 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
488 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
489 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
490 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
491 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
492 static int mv_port_start(struct ata_port *ap);
493 static void mv_port_stop(struct ata_port *ap);
494 static void mv_qc_prep(struct ata_queued_cmd *qc);
495 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
496 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
497 static int mv_hardreset(struct ata_link *link, unsigned int *class,
498 unsigned long deadline);
499 static void mv_eh_freeze(struct ata_port *ap);
500 static void mv_eh_thaw(struct ata_port *ap);
501 static void mv6_dev_config(struct ata_device *dev);
503 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
506 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
511 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
513 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
516 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
521 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
526 void __iomem *mmio, unsigned int n_hc);
527 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
530 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
531 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
532 unsigned int port_no);
533 static int mv_stop_edma(struct ata_port *ap);
534 static int mv_stop_edma_engine(void __iomem *port_mmio);
535 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
537 static void mv_pmp_select(struct ata_port *ap, int pmp);
538 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
539 unsigned long deadline);
540 static int mv_softreset(struct ata_link *link, unsigned int *class,
541 unsigned long deadline);
543 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
544 * because we have to allow room for worst case splitting of
545 * PRDs for 64K boundaries in mv_fill_sg().
547 static struct scsi_host_template mv5_sht = {
548 ATA_BASE_SHT(DRV_NAME),
549 .sg_tablesize = MV_MAX_SG_CT / 2,
550 .dma_boundary = MV_DMA_BOUNDARY,
553 static struct scsi_host_template mv6_sht = {
554 ATA_NCQ_SHT(DRV_NAME),
555 .can_queue = MV_MAX_Q_DEPTH - 1,
556 .sg_tablesize = MV_MAX_SG_CT / 2,
557 .dma_boundary = MV_DMA_BOUNDARY,
560 static struct ata_port_operations mv5_ops = {
561 .inherits = &ata_sff_port_ops,
563 .qc_prep = mv_qc_prep,
564 .qc_issue = mv_qc_issue,
566 .freeze = mv_eh_freeze,
568 .hardreset = mv_hardreset,
569 .error_handler = ata_std_error_handler, /* avoid SFF EH */
570 .post_internal_cmd = ATA_OP_NULL,
572 .scr_read = mv5_scr_read,
573 .scr_write = mv5_scr_write,
575 .port_start = mv_port_start,
576 .port_stop = mv_port_stop,
579 static struct ata_port_operations mv6_ops = {
580 .inherits = &mv5_ops,
581 .qc_defer = sata_pmp_qc_defer_cmd_switch,
582 .dev_config = mv6_dev_config,
583 .scr_read = mv_scr_read,
584 .scr_write = mv_scr_write,
586 .pmp_hardreset = mv_pmp_hardreset,
587 .pmp_softreset = mv_softreset,
588 .softreset = mv_softreset,
589 .error_handler = sata_pmp_error_handler,
592 static struct ata_port_operations mv_iie_ops = {
593 .inherits = &mv6_ops,
594 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
595 .dev_config = ATA_OP_NULL,
596 .qc_prep = mv_qc_prep_iie,
599 static const struct ata_port_info mv_port_info[] = {
601 .flags = MV_COMMON_FLAGS,
602 .pio_mask = 0x1f, /* pio0-4 */
603 .udma_mask = ATA_UDMA6,
604 .port_ops = &mv5_ops,
607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
608 .pio_mask = 0x1f, /* pio0-4 */
609 .udma_mask = ATA_UDMA6,
610 .port_ops = &mv5_ops,
613 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
614 .pio_mask = 0x1f, /* pio0-4 */
615 .udma_mask = ATA_UDMA6,
616 .port_ops = &mv5_ops,
619 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
620 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
622 .pio_mask = 0x1f, /* pio0-4 */
623 .udma_mask = ATA_UDMA6,
624 .port_ops = &mv6_ops,
627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
628 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
629 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
630 .pio_mask = 0x1f, /* pio0-4 */
631 .udma_mask = ATA_UDMA6,
632 .port_ops = &mv6_ops,
635 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
636 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
638 .pio_mask = 0x1f, /* pio0-4 */
639 .udma_mask = ATA_UDMA6,
640 .port_ops = &mv_iie_ops,
643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
644 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
646 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv_iie_ops,
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
652 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
653 ATA_FLAG_NCQ | MV_FLAG_SOC,
654 .pio_mask = 0x1f, /* pio0-4 */
655 .udma_mask = ATA_UDMA6,
656 .port_ops = &mv_iie_ops,
660 static const struct pci_device_id mv_pci_tbl[] = {
661 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
662 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
664 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
665 /* RocketRAID 1740/174x have different identifiers */
666 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
667 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
669 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
670 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
672 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
673 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
675 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
678 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680 /* Marvell 7042 support */
681 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683 /* Highpoint RocketRAID PCIe series */
684 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
685 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687 { } /* terminate list */
690 static const struct mv_hw_ops mv5xxx_ops = {
691 .phy_errata = mv5_phy_errata,
692 .enable_leds = mv5_enable_leds,
693 .read_preamp = mv5_read_preamp,
694 .reset_hc = mv5_reset_hc,
695 .reset_flash = mv5_reset_flash,
696 .reset_bus = mv5_reset_bus,
699 static const struct mv_hw_ops mv6xxx_ops = {
700 .phy_errata = mv6_phy_errata,
701 .enable_leds = mv6_enable_leds,
702 .read_preamp = mv6_read_preamp,
703 .reset_hc = mv6_reset_hc,
704 .reset_flash = mv6_reset_flash,
705 .reset_bus = mv_reset_pci_bus,
708 static const struct mv_hw_ops mv_soc_ops = {
709 .phy_errata = mv6_phy_errata,
710 .enable_leds = mv_soc_enable_leds,
711 .read_preamp = mv_soc_read_preamp,
712 .reset_hc = mv_soc_reset_hc,
713 .reset_flash = mv_soc_reset_flash,
714 .reset_bus = mv_soc_reset_bus,
721 static inline void writelfl(unsigned long data, void __iomem *addr)
724 (void) readl(addr); /* flush to avoid PCI posted write */
727 static inline unsigned int mv_hc_from_port(unsigned int port)
729 return port >> MV_PORT_HC_SHIFT;
732 static inline unsigned int mv_hardport_from_port(unsigned int port)
734 return port & MV_PORT_MASK;
738 * Consolidate some rather tricky bit shift calculations.
739 * This is hot-path stuff, so not a function.
740 * Simple code, with two return values, so macro rather than inline.
742 * port is the sole input, in range 0..7.
743 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
744 * hardport is the other output, in range 0..3.
746 * Note that port and hardport may be the same variable in some cases.
748 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
750 shift = mv_hc_from_port(port) * HC_SHIFT; \
751 hardport = mv_hardport_from_port(port); \
752 shift += hardport * 2; \
755 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
760 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
763 return mv_hc_base(base, mv_hc_from_port(port));
766 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768 return mv_hc_base_from_port(base, port) +
769 MV_SATAHC_ARBTR_REG_SZ +
770 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
773 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
776 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778 return hc_mmio + ofs;
781 static inline void __iomem *mv_host_base(struct ata_host *host)
783 struct mv_host_priv *hpriv = host->private_data;
787 static inline void __iomem *mv_ap_base(struct ata_port *ap)
789 return mv_port_base(mv_host_base(ap->host), ap->port_no);
792 static inline int mv_get_hc_count(unsigned long port_flags)
794 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
797 static void mv_set_edma_ptrs(void __iomem *port_mmio,
798 struct mv_host_priv *hpriv,
799 struct mv_port_priv *pp)
804 * initialize request queue
806 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
807 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
809 WARN_ON(pp->crqb_dma & 0x3ff);
810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
814 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
815 writelfl((pp->crqb_dma & 0xffffffff) | index,
816 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
821 * initialize response queue
823 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
824 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
826 WARN_ON(pp->crpb_dma & 0xff);
827 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
829 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
830 writelfl((pp->crpb_dma & 0xffffffff) | index,
831 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
833 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
835 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
836 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
840 * mv_start_dma - Enable eDMA engine
841 * @base: port base address
842 * @pp: port private data
844 * Verify the local cache of the eDMA state is accurate with a
848 * Inherited from caller.
850 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
851 struct mv_port_priv *pp, u8 protocol)
853 int want_ncq = (protocol == ATA_PROT_NCQ);
855 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
856 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
857 if (want_ncq != using_ncq)
860 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
861 struct mv_host_priv *hpriv = ap->host->private_data;
862 int hardport = mv_hardport_from_port(ap->port_no);
863 void __iomem *hc_mmio = mv_hc_base_from_port(
864 mv_host_base(ap->host), hardport);
865 u32 hc_irq_cause, ipending;
867 /* clear EDMA event indicators, if any */
868 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
870 /* clear EDMA interrupt indicator, if any */
871 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
872 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
873 if (hc_irq_cause & ipending) {
874 writelfl(hc_irq_cause & ~ipending,
875 hc_mmio + HC_IRQ_CAUSE_OFS);
878 mv_edma_cfg(ap, want_ncq);
880 /* clear FIS IRQ Cause */
881 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
883 mv_set_edma_ptrs(port_mmio, hpriv, pp);
885 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
886 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
891 * mv_stop_edma_engine - Disable eDMA engine
892 * @port_mmio: io base address
895 * Inherited from caller.
897 static int mv_stop_edma_engine(void __iomem *port_mmio)
901 /* Disable eDMA. The disable bit auto clears. */
902 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
904 /* Wait for the chip to confirm eDMA is off. */
905 for (i = 10000; i > 0; i--) {
906 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
907 if (!(reg & EDMA_EN))
914 static int mv_stop_edma(struct ata_port *ap)
916 void __iomem *port_mmio = mv_ap_base(ap);
917 struct mv_port_priv *pp = ap->private_data;
919 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
921 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
922 if (mv_stop_edma_engine(port_mmio)) {
923 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
930 static void mv_dump_mem(void __iomem *start, unsigned bytes)
933 for (b = 0; b < bytes; ) {
934 DPRINTK("%p: ", start + b);
935 for (w = 0; b < bytes && w < 4; w++) {
936 printk("%08x ", readl(start + b));
944 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
949 for (b = 0; b < bytes; ) {
950 DPRINTK("%02x: ", b);
951 for (w = 0; b < bytes && w < 4; w++) {
952 (void) pci_read_config_dword(pdev, b, &dw);
960 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
961 struct pci_dev *pdev)
964 void __iomem *hc_base = mv_hc_base(mmio_base,
965 port >> MV_PORT_HC_SHIFT);
966 void __iomem *port_base;
967 int start_port, num_ports, p, start_hc, num_hcs, hc;
970 start_hc = start_port = 0;
971 num_ports = 8; /* shld be benign for 4 port devs */
974 start_hc = port >> MV_PORT_HC_SHIFT;
976 num_ports = num_hcs = 1;
978 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
979 num_ports > 1 ? num_ports - 1 : start_port);
982 DPRINTK("PCI config space regs:\n");
983 mv_dump_pci_cfg(pdev, 0x68);
985 DPRINTK("PCI regs:\n");
986 mv_dump_mem(mmio_base+0xc00, 0x3c);
987 mv_dump_mem(mmio_base+0xd00, 0x34);
988 mv_dump_mem(mmio_base+0xf00, 0x4);
989 mv_dump_mem(mmio_base+0x1d00, 0x6c);
990 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
991 hc_base = mv_hc_base(mmio_base, hc);
992 DPRINTK("HC regs (HC %i):\n", hc);
993 mv_dump_mem(hc_base, 0x1c);
995 for (p = start_port; p < start_port + num_ports; p++) {
996 port_base = mv_port_base(mmio_base, p);
997 DPRINTK("EDMA regs (port %i):\n", p);
998 mv_dump_mem(port_base, 0x54);
999 DPRINTK("SATA regs (port %i):\n", p);
1000 mv_dump_mem(port_base+0x300, 0x60);
1005 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1009 switch (sc_reg_in) {
1013 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1016 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1025 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1027 unsigned int ofs = mv_scr_offset(sc_reg_in);
1029 if (ofs != 0xffffffffU) {
1030 *val = readl(mv_ap_base(ap) + ofs);
1036 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1038 unsigned int ofs = mv_scr_offset(sc_reg_in);
1040 if (ofs != 0xffffffffU) {
1041 writelfl(val, mv_ap_base(ap) + ofs);
1047 static void mv6_dev_config(struct ata_device *adev)
1050 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1052 * Gen-II does not support NCQ over a port multiplier
1053 * (no FIS-based switching).
1055 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1056 * See mv_qc_prep() for more info.
1058 if (adev->flags & ATA_DFLAG_NCQ) {
1059 if (sata_pmp_attached(adev->link->ap)) {
1060 adev->flags &= ~ATA_DFLAG_NCQ;
1061 ata_dev_printk(adev, KERN_INFO,
1062 "NCQ disabled for command-based switching\n");
1063 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1064 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1065 ata_dev_printk(adev, KERN_INFO,
1066 "max_sectors limited to %u for NCQ\n",
1072 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1074 u32 old_fiscfg, new_fiscfg, old_ltmode, new_ltmode;
1076 * Various bit settings required for operation
1077 * in FIS-based switching (fbs) mode on GenIIe:
1079 old_fiscfg = readl(port_mmio + FISCFG_OFS);
1080 old_ltmode = readl(port_mmio + LTMODE_OFS);
1082 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1083 new_ltmode = old_ltmode | LTMODE_BIT8;
1084 } else { /* disable fbs */
1085 new_fiscfg = old_fiscfg & ~FISCFG_SINGLE_SYNC;
1086 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1088 if (new_fiscfg != old_fiscfg)
1089 writelfl(new_fiscfg, port_mmio + FISCFG_OFS);
1090 if (new_ltmode != old_ltmode)
1091 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1094 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1097 struct mv_port_priv *pp = ap->private_data;
1098 struct mv_host_priv *hpriv = ap->host->private_data;
1099 void __iomem *port_mmio = mv_ap_base(ap);
1101 /* set up non-NCQ EDMA configuration */
1102 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1104 if (IS_GEN_I(hpriv))
1105 cfg |= (1 << 8); /* enab config burst size mask */
1107 else if (IS_GEN_II(hpriv))
1108 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1110 else if (IS_GEN_IIE(hpriv)) {
1111 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1112 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1113 cfg |= (1 << 18); /* enab early completion */
1114 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1116 if (want_ncq && sata_pmp_attached(ap)) {
1117 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1118 mv_config_fbs(port_mmio, 1);
1120 mv_config_fbs(port_mmio, 0);
1125 cfg |= EDMA_CFG_NCQ;
1126 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1128 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1130 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1133 static void mv_port_free_dma_mem(struct ata_port *ap)
1135 struct mv_host_priv *hpriv = ap->host->private_data;
1136 struct mv_port_priv *pp = ap->private_data;
1140 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1144 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1148 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1149 * For later hardware, we have one unique sg_tbl per NCQ tag.
1151 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1152 if (pp->sg_tbl[tag]) {
1153 if (tag == 0 || !IS_GEN_I(hpriv))
1154 dma_pool_free(hpriv->sg_tbl_pool,
1156 pp->sg_tbl_dma[tag]);
1157 pp->sg_tbl[tag] = NULL;
1163 * mv_port_start - Port specific init/start routine.
1164 * @ap: ATA channel to manipulate
1166 * Allocate and point to DMA memory, init port private memory,
1170 * Inherited from caller.
1172 static int mv_port_start(struct ata_port *ap)
1174 struct device *dev = ap->host->dev;
1175 struct mv_host_priv *hpriv = ap->host->private_data;
1176 struct mv_port_priv *pp;
1179 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1182 ap->private_data = pp;
1184 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1187 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1189 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1191 goto out_port_free_dma_mem;
1192 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1195 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1196 * For later hardware, we need one unique sg_tbl per NCQ tag.
1198 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1199 if (tag == 0 || !IS_GEN_I(hpriv)) {
1200 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1201 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1202 if (!pp->sg_tbl[tag])
1203 goto out_port_free_dma_mem;
1205 pp->sg_tbl[tag] = pp->sg_tbl[0];
1206 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1211 out_port_free_dma_mem:
1212 mv_port_free_dma_mem(ap);
1217 * mv_port_stop - Port specific cleanup/stop routine.
1218 * @ap: ATA channel to manipulate
1220 * Stop DMA, cleanup port memory.
1223 * This routine uses the host lock to protect the DMA stop.
1225 static void mv_port_stop(struct ata_port *ap)
1228 mv_port_free_dma_mem(ap);
1232 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1233 * @qc: queued command whose SG list to source from
1235 * Populate the SG list and mark the last entry.
1238 * Inherited from caller.
1240 static void mv_fill_sg(struct ata_queued_cmd *qc)
1242 struct mv_port_priv *pp = qc->ap->private_data;
1243 struct scatterlist *sg;
1244 struct mv_sg *mv_sg, *last_sg = NULL;
1247 mv_sg = pp->sg_tbl[qc->tag];
1248 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1249 dma_addr_t addr = sg_dma_address(sg);
1250 u32 sg_len = sg_dma_len(sg);
1253 u32 offset = addr & 0xffff;
1256 if ((offset + sg_len > 0x10000))
1257 len = 0x10000 - offset;
1259 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1260 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1261 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1271 if (likely(last_sg))
1272 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1275 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1277 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1278 (last ? CRQB_CMD_LAST : 0);
1279 *cmdw = cpu_to_le16(tmp);
1283 * mv_qc_prep - Host specific command preparation.
1284 * @qc: queued command to prepare
1286 * This routine simply redirects to the general purpose routine
1287 * if command is not DMA. Else, it handles prep of the CRQB
1288 * (command request block), does some sanity checking, and calls
1289 * the SG load routine.
1292 * Inherited from caller.
1294 static void mv_qc_prep(struct ata_queued_cmd *qc)
1296 struct ata_port *ap = qc->ap;
1297 struct mv_port_priv *pp = ap->private_data;
1299 struct ata_taskfile *tf;
1303 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1304 (qc->tf.protocol != ATA_PROT_NCQ))
1307 /* Fill in command request block
1309 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1310 flags |= CRQB_FLAG_READ;
1311 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1312 flags |= qc->tag << CRQB_TAG_SHIFT;
1313 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1315 /* get current queue index from software */
1316 in_index = pp->req_idx;
1318 pp->crqb[in_index].sg_addr =
1319 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1320 pp->crqb[in_index].sg_addr_hi =
1321 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1322 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1324 cw = &pp->crqb[in_index].ata_cmd[0];
1327 /* Sadly, the CRQB cannot accomodate all registers--there are
1328 * only 11 bytes...so we must pick and choose required
1329 * registers based on the command. So, we drop feature and
1330 * hob_feature for [RW] DMA commands, but they are needed for
1331 * NCQ. NCQ will drop hob_nsect.
1333 switch (tf->command) {
1335 case ATA_CMD_READ_EXT:
1337 case ATA_CMD_WRITE_EXT:
1338 case ATA_CMD_WRITE_FUA_EXT:
1339 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1341 case ATA_CMD_FPDMA_READ:
1342 case ATA_CMD_FPDMA_WRITE:
1343 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1344 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1347 /* The only other commands EDMA supports in non-queued and
1348 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1349 * of which are defined/used by Linux. If we get here, this
1350 * driver needs work.
1352 * FIXME: modify libata to give qc_prep a return value and
1353 * return error here.
1355 BUG_ON(tf->command);
1358 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1366 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1368 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1374 * mv_qc_prep_iie - Host specific command preparation.
1375 * @qc: queued command to prepare
1377 * This routine simply redirects to the general purpose routine
1378 * if command is not DMA. Else, it handles prep of the CRQB
1379 * (command request block), does some sanity checking, and calls
1380 * the SG load routine.
1383 * Inherited from caller.
1385 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1387 struct ata_port *ap = qc->ap;
1388 struct mv_port_priv *pp = ap->private_data;
1389 struct mv_crqb_iie *crqb;
1390 struct ata_taskfile *tf;
1394 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1395 (qc->tf.protocol != ATA_PROT_NCQ))
1398 /* Fill in Gen IIE command request block */
1399 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1400 flags |= CRQB_FLAG_READ;
1402 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1403 flags |= qc->tag << CRQB_TAG_SHIFT;
1404 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1405 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1407 /* get current queue index from software */
1408 in_index = pp->req_idx;
1410 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1411 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1412 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1413 crqb->flags = cpu_to_le32(flags);
1416 crqb->ata_cmd[0] = cpu_to_le32(
1417 (tf->command << 16) |
1420 crqb->ata_cmd[1] = cpu_to_le32(
1426 crqb->ata_cmd[2] = cpu_to_le32(
1427 (tf->hob_lbal << 0) |
1428 (tf->hob_lbam << 8) |
1429 (tf->hob_lbah << 16) |
1430 (tf->hob_feature << 24)
1432 crqb->ata_cmd[3] = cpu_to_le32(
1434 (tf->hob_nsect << 8)
1437 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1443 * mv_qc_issue - Initiate a command to the host
1444 * @qc: queued command to start
1446 * This routine simply redirects to the general purpose routine
1447 * if command is not DMA. Else, it sanity checks our local
1448 * caches of the request producer/consumer indices then enables
1449 * DMA and bumps the request producer index.
1452 * Inherited from caller.
1454 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1456 struct ata_port *ap = qc->ap;
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 struct mv_port_priv *pp = ap->private_data;
1461 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1462 (qc->tf.protocol != ATA_PROT_NCQ)) {
1464 * We're about to send a non-EDMA capable command to the
1465 * port. Turn off EDMA so there won't be problems accessing
1466 * shadow block, etc registers.
1469 mv_pmp_select(ap, qc->dev->link->pmp);
1470 return ata_sff_qc_issue(qc);
1473 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1475 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1476 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1478 /* and write the request in pointer to kick the EDMA to life */
1479 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1480 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1485 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1487 struct mv_port_priv *pp = ap->private_data;
1488 struct ata_queued_cmd *qc;
1490 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1492 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1493 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1498 static void mv_unexpected_intr(struct ata_port *ap)
1500 struct mv_port_priv *pp = ap->private_data;
1501 struct ata_eh_info *ehi = &ap->link.eh_info;
1505 * We got a device interrupt from something that
1506 * was supposed to be using EDMA or polling.
1508 ata_ehi_clear_desc(ehi);
1509 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1510 when = " while EDMA enabled";
1512 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1513 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1514 when = " while polling";
1516 ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
1517 ehi->err_mask |= AC_ERR_OTHER;
1518 ehi->action |= ATA_EH_RESET;
1519 ata_port_freeze(ap);
1523 * mv_err_intr - Handle error interrupts on the port
1524 * @ap: ATA channel to manipulate
1525 * @qc: affected command (non-NCQ), or NULL
1527 * Most cases require a full reset of the chip's state machine,
1528 * which also performs a COMRESET.
1529 * Also, if the port disabled DMA, update our cached copy to match.
1532 * Inherited from caller.
1534 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1536 void __iomem *port_mmio = mv_ap_base(ap);
1537 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1538 struct mv_port_priv *pp = ap->private_data;
1539 struct mv_host_priv *hpriv = ap->host->private_data;
1540 unsigned int action = 0, err_mask = 0;
1541 struct ata_eh_info *ehi = &ap->link.eh_info;
1543 ata_ehi_clear_desc(ehi);
1546 * Read and clear the err_cause bits. This won't actually
1547 * clear for some errors (eg. SError), but we will be doing
1548 * a hard reset in those cases regardless, which *will* clear it.
1550 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1551 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1553 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1556 * All generations share these EDMA error cause bits:
1558 if (edma_err_cause & EDMA_ERR_DEV)
1559 err_mask |= AC_ERR_DEV;
1560 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1561 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1562 EDMA_ERR_INTRL_PAR)) {
1563 err_mask |= AC_ERR_ATA_BUS;
1564 action |= ATA_EH_RESET;
1565 ata_ehi_push_desc(ehi, "parity error");
1567 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1568 ata_ehi_hotplugged(ehi);
1569 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1570 "dev disconnect" : "dev connect");
1571 action |= ATA_EH_RESET;
1575 * Gen-I has a different SELF_DIS bit,
1576 * different FREEZE bits, and no SERR bit:
1578 if (IS_GEN_I(hpriv)) {
1579 eh_freeze_mask = EDMA_EH_FREEZE_5;
1580 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1581 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1582 ata_ehi_push_desc(ehi, "EDMA self-disable");
1585 eh_freeze_mask = EDMA_EH_FREEZE;
1586 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1587 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1588 ata_ehi_push_desc(ehi, "EDMA self-disable");
1590 if (edma_err_cause & EDMA_ERR_SERR) {
1592 * Ensure that we read our own SCR, not a pmp link SCR:
1594 ap->ops->scr_read(ap, SCR_ERROR, &serr);
1596 * Don't clear SError here; leave it for libata-eh:
1598 ata_ehi_push_desc(ehi, "SError=%08x", serr);
1599 err_mask |= AC_ERR_ATA_BUS;
1600 action |= ATA_EH_RESET;
1605 err_mask = AC_ERR_OTHER;
1606 action |= ATA_EH_RESET;
1609 ehi->serror |= serr;
1610 ehi->action |= action;
1613 qc->err_mask |= err_mask;
1615 ehi->err_mask |= err_mask;
1617 if (edma_err_cause & eh_freeze_mask)
1618 ata_port_freeze(ap);
1623 static void mv_process_crpb_response(struct ata_port *ap,
1624 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1626 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
1630 u16 edma_status = le16_to_cpu(response->flags);
1632 * edma_status from a response queue entry:
1633 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
1634 * MSB is saved ATA status from command completion.
1637 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
1640 * Error will be seen/handled by mv_err_intr().
1641 * So do nothing at all here.
1646 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
1647 qc->err_mask |= ac_err_mask(ata_status);
1648 ata_qc_complete(qc);
1650 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
1655 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
1657 void __iomem *port_mmio = mv_ap_base(ap);
1658 struct mv_host_priv *hpriv = ap->host->private_data;
1660 bool work_done = false;
1661 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
1663 /* Get the hardware queue position index */
1664 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1665 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1667 /* Process new responses from since the last time we looked */
1668 while (in_index != pp->resp_idx) {
1670 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
1672 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1674 if (IS_GEN_I(hpriv)) {
1675 /* 50xx: no NCQ, only one command active at a time */
1676 tag = ap->link.active_tag;
1678 /* Gen II/IIE: get command tag from CRPB entry */
1679 tag = le16_to_cpu(response->id) & 0x1f;
1681 mv_process_crpb_response(ap, response, tag, ncq_enabled);
1685 /* Update the software queue position index in hardware */
1687 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1688 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
1689 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1693 * mv_host_intr - Handle all interrupts on the given host controller
1694 * @host: host specific structure
1695 * @main_irq_cause: Main interrupt cause register for the chip.
1698 * Inherited from caller.
1700 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1702 struct mv_host_priv *hpriv = host->private_data;
1703 void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
1704 u32 hc_irq_cause = 0;
1705 unsigned int handled = 0, port;
1707 for (port = 0; port < hpriv->n_ports; port++) {
1708 struct ata_port *ap = host->ports[port];
1709 struct mv_port_priv *pp;
1710 unsigned int shift, hardport, port_cause;
1712 * When we move to the second hc, flag our cached
1713 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1715 if (port == MV_PORTS_PER_HC)
1718 * Do nothing if port is not interrupting or is disabled:
1720 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1721 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1722 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1725 * Each hc within the host has its own hc_irq_cause register.
1726 * We defer reading it until we know we need it, right now:
1728 * FIXME later: we don't really need to read this register
1729 * (some logic changes required below if we go that way),
1730 * because it doesn't tell us anything new. But we do need
1731 * to write to it, outside the top of this loop,
1732 * to reset the interrupt triggers for next time.
1735 hc_mmio = mv_hc_base_from_port(mmio, port);
1736 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1737 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1741 * Process completed CRPB response(s) before other events.
1743 pp = ap->private_data;
1744 if (hc_irq_cause & (DMA_IRQ << hardport)) {
1745 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1746 mv_process_crpb_entries(ap, pp);
1749 * Handle chip-reported errors, or continue on to handle PIO.
1751 if (unlikely(port_cause & ERR_IRQ)) {
1752 mv_err_intr(ap, mv_get_active_qc(ap));
1753 } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
1754 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1755 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1757 ata_sff_host_intr(ap, qc);
1761 mv_unexpected_intr(ap);
1767 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
1769 struct mv_host_priv *hpriv = host->private_data;
1770 struct ata_port *ap;
1771 struct ata_queued_cmd *qc;
1772 struct ata_eh_info *ehi;
1773 unsigned int i, err_mask, printed = 0;
1776 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1778 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1781 DPRINTK("All regs @ PCI error\n");
1782 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1784 writelfl(0, mmio + hpriv->irq_cause_ofs);
1786 for (i = 0; i < host->n_ports; i++) {
1787 ap = host->ports[i];
1788 if (!ata_link_offline(&ap->link)) {
1789 ehi = &ap->link.eh_info;
1790 ata_ehi_clear_desc(ehi);
1792 ata_ehi_push_desc(ehi,
1793 "PCI err cause 0x%08x", err_cause);
1794 err_mask = AC_ERR_HOST_BUS;
1795 ehi->action = ATA_EH_RESET;
1796 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1798 qc->err_mask |= err_mask;
1800 ehi->err_mask |= err_mask;
1802 ata_port_freeze(ap);
1805 return 1; /* handled */
1809 * mv_interrupt - Main interrupt event handler
1811 * @dev_instance: private data; in this case the host structure
1813 * Read the read only register to determine if any host
1814 * controllers have pending interrupts. If so, call lower level
1815 * routine to handle. Also check for PCI errors which are only
1819 * This routine holds the host lock while processing pending
1822 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1824 struct ata_host *host = dev_instance;
1825 struct mv_host_priv *hpriv = host->private_data;
1826 unsigned int handled = 0;
1827 u32 main_irq_cause, main_irq_mask;
1829 spin_lock(&host->lock);
1830 main_irq_cause = readl(hpriv->main_irq_cause_addr);
1831 main_irq_mask = readl(hpriv->main_irq_mask_addr);
1833 * Deal with cases where we either have nothing pending, or have read
1834 * a bogus register value which can indicate HW removal or PCI fault.
1836 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) {
1837 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host)))
1838 handled = mv_pci_error(host, hpriv->base);
1840 handled = mv_host_intr(host, main_irq_cause);
1842 spin_unlock(&host->lock);
1843 return IRQ_RETVAL(handled);
1846 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1850 switch (sc_reg_in) {
1854 ofs = sc_reg_in * sizeof(u32);
1863 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1865 struct mv_host_priv *hpriv = ap->host->private_data;
1866 void __iomem *mmio = hpriv->base;
1867 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1868 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1870 if (ofs != 0xffffffffU) {
1871 *val = readl(addr + ofs);
1877 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1879 struct mv_host_priv *hpriv = ap->host->private_data;
1880 void __iomem *mmio = hpriv->base;
1881 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1882 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1884 if (ofs != 0xffffffffU) {
1885 writelfl(val, addr + ofs);
1891 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1893 struct pci_dev *pdev = to_pci_dev(host->dev);
1896 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1899 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1901 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1904 mv_reset_pci_bus(host, mmio);
1907 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1909 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
1912 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1915 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1918 tmp = readl(phy_mmio + MV5_PHY_MODE);
1920 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1921 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1924 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1928 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
1930 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1932 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1934 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1937 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1940 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1941 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1943 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1946 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
1948 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
1950 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
1953 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
1956 tmp = readl(phy_mmio + MV5_PHY_MODE);
1958 tmp |= hpriv->signal[port].pre;
1959 tmp |= hpriv->signal[port].amps;
1960 writel(tmp, phy_mmio + MV5_PHY_MODE);
1965 #define ZERO(reg) writel(0, port_mmio + (reg))
1966 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1969 void __iomem *port_mmio = mv_port_base(mmio, port);
1971 mv_reset_channel(hpriv, mmio, port);
1973 ZERO(0x028); /* command */
1974 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1975 ZERO(0x004); /* timer */
1976 ZERO(0x008); /* irq err cause */
1977 ZERO(0x00c); /* irq err mask */
1978 ZERO(0x010); /* rq bah */
1979 ZERO(0x014); /* rq inp */
1980 ZERO(0x018); /* rq outp */
1981 ZERO(0x01c); /* respq bah */
1982 ZERO(0x024); /* respq outp */
1983 ZERO(0x020); /* respq inp */
1984 ZERO(0x02c); /* test control */
1985 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
1989 #define ZERO(reg) writel(0, hc_mmio + (reg))
1990 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1993 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2001 tmp = readl(hc_mmio + 0x20);
2004 writel(tmp, hc_mmio + 0x20);
2008 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2011 unsigned int hc, port;
2013 for (hc = 0; hc < n_hc; hc++) {
2014 for (port = 0; port < MV_PORTS_PER_HC; port++)
2015 mv5_reset_hc_port(hpriv, mmio,
2016 (hc * MV_PORTS_PER_HC) + port);
2018 mv5_reset_one_hc(hpriv, mmio, hc);
2025 #define ZERO(reg) writel(0, mmio + (reg))
2026 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2028 struct mv_host_priv *hpriv = host->private_data;
2031 tmp = readl(mmio + MV_PCI_MODE_OFS);
2033 writel(tmp, mmio + MV_PCI_MODE_OFS);
2035 ZERO(MV_PCI_DISC_TIMER);
2036 ZERO(MV_PCI_MSI_TRIGGER);
2037 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2038 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2039 ZERO(MV_PCI_SERR_MASK);
2040 ZERO(hpriv->irq_cause_ofs);
2041 ZERO(hpriv->irq_mask_ofs);
2042 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2043 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2044 ZERO(MV_PCI_ERR_ATTRIBUTE);
2045 ZERO(MV_PCI_ERR_COMMAND);
2049 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2053 mv5_reset_flash(hpriv, mmio);
2055 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
2057 tmp |= (1 << 5) | (1 << 6);
2058 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
2062 * mv6_reset_hc - Perform the 6xxx global soft reset
2063 * @mmio: base address of the HBA
2065 * This routine only applies to 6xxx parts.
2068 * Inherited from caller.
2070 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2073 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2077 /* Following procedure defined in PCI "main command and status
2081 writel(t | STOP_PCI_MASTER, reg);
2083 for (i = 0; i < 1000; i++) {
2086 if (PCI_MASTER_EMPTY & t)
2089 if (!(PCI_MASTER_EMPTY & t)) {
2090 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2098 writel(t | GLOB_SFT_RST, reg);
2101 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2103 if (!(GLOB_SFT_RST & t)) {
2104 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2109 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2112 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2115 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2117 if (GLOB_SFT_RST & t) {
2118 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2125 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2128 void __iomem *port_mmio;
2131 tmp = readl(mmio + MV_RESET_CFG_OFS);
2132 if ((tmp & (1 << 0)) == 0) {
2133 hpriv->signal[idx].amps = 0x7 << 8;
2134 hpriv->signal[idx].pre = 0x1 << 5;
2138 port_mmio = mv_port_base(mmio, idx);
2139 tmp = readl(port_mmio + PHY_MODE2);
2141 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2142 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2145 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2147 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
2150 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2153 void __iomem *port_mmio = mv_port_base(mmio, port);
2155 u32 hp_flags = hpriv->hp_flags;
2157 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2159 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2162 if (fix_phy_mode2) {
2163 m2 = readl(port_mmio + PHY_MODE2);
2166 writel(m2, port_mmio + PHY_MODE2);
2170 m2 = readl(port_mmio + PHY_MODE2);
2171 m2 &= ~((1 << 16) | (1 << 31));
2172 writel(m2, port_mmio + PHY_MODE2);
2177 /* who knows what this magic does */
2178 tmp = readl(port_mmio + PHY_MODE3);
2181 writel(tmp, port_mmio + PHY_MODE3);
2183 if (fix_phy_mode4) {
2186 m4 = readl(port_mmio + PHY_MODE4);
2188 if (hp_flags & MV_HP_ERRATA_60X1B2)
2189 tmp = readl(port_mmio + PHY_MODE3);
2191 /* workaround for errata FEr SATA#10 (part 1) */
2192 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2194 writel(m4, port_mmio + PHY_MODE4);
2196 if (hp_flags & MV_HP_ERRATA_60X1B2)
2197 writel(tmp, port_mmio + PHY_MODE3);
2200 /* Revert values of pre-emphasis and signal amps to the saved ones */
2201 m2 = readl(port_mmio + PHY_MODE2);
2203 m2 &= ~MV_M2_PREAMP_MASK;
2204 m2 |= hpriv->signal[port].amps;
2205 m2 |= hpriv->signal[port].pre;
2208 /* according to mvSata 3.6.1, some IIE values are fixed */
2209 if (IS_GEN_IIE(hpriv)) {
2214 writel(m2, port_mmio + PHY_MODE2);
2217 /* TODO: use the generic LED interface to configure the SATA Presence */
2218 /* & Acitivy LEDs on the board */
2219 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2225 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2228 void __iomem *port_mmio;
2231 port_mmio = mv_port_base(mmio, idx);
2232 tmp = readl(port_mmio + PHY_MODE2);
2234 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2235 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2239 #define ZERO(reg) writel(0, port_mmio + (reg))
2240 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2241 void __iomem *mmio, unsigned int port)
2243 void __iomem *port_mmio = mv_port_base(mmio, port);
2245 mv_reset_channel(hpriv, mmio, port);
2247 ZERO(0x028); /* command */
2248 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2249 ZERO(0x004); /* timer */
2250 ZERO(0x008); /* irq err cause */
2251 ZERO(0x00c); /* irq err mask */
2252 ZERO(0x010); /* rq bah */
2253 ZERO(0x014); /* rq inp */
2254 ZERO(0x018); /* rq outp */
2255 ZERO(0x01c); /* respq bah */
2256 ZERO(0x024); /* respq outp */
2257 ZERO(0x020); /* respq inp */
2258 ZERO(0x02c); /* test control */
2259 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2264 #define ZERO(reg) writel(0, hc_mmio + (reg))
2265 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2268 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2278 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2279 void __iomem *mmio, unsigned int n_hc)
2283 for (port = 0; port < hpriv->n_ports; port++)
2284 mv_soc_reset_hc_port(hpriv, mmio, port);
2286 mv_soc_reset_one_hc(hpriv, mmio);
2291 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2297 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2302 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
2304 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
2306 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
2308 ifcfg |= (1 << 7); /* enable gen2i speed */
2309 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
2312 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2313 unsigned int port_no)
2315 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2318 * The datasheet warns against setting EDMA_RESET when EDMA is active
2319 * (but doesn't say what the problem might be). So we first try
2320 * to disable the EDMA engine before doing the EDMA_RESET operation.
2322 mv_stop_edma_engine(port_mmio);
2323 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2325 if (!IS_GEN_I(hpriv)) {
2326 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
2327 mv_setup_ifcfg(port_mmio, 1);
2330 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
2331 * link, and physical layers. It resets all SATA interface registers
2332 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2334 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
2335 udelay(25); /* allow reset propagation */
2336 writelfl(0, port_mmio + EDMA_CMD_OFS);
2338 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2340 if (IS_GEN_I(hpriv))
2344 static void mv_pmp_select(struct ata_port *ap, int pmp)
2346 if (sata_pmp_supported(ap)) {
2347 void __iomem *port_mmio = mv_ap_base(ap);
2348 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2349 int old = reg & 0xf;
2352 reg = (reg & ~0xf) | pmp;
2353 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2358 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2359 unsigned long deadline)
2361 mv_pmp_select(link->ap, sata_srst_pmp(link));
2362 return sata_std_hardreset(link, class, deadline);
2365 static int mv_softreset(struct ata_link *link, unsigned int *class,
2366 unsigned long deadline)
2368 mv_pmp_select(link->ap, sata_srst_pmp(link));
2369 return ata_sff_softreset(link, class, deadline);
2372 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2373 unsigned long deadline)
2375 struct ata_port *ap = link->ap;
2376 struct mv_host_priv *hpriv = ap->host->private_data;
2377 struct mv_port_priv *pp = ap->private_data;
2378 void __iomem *mmio = hpriv->base;
2379 int rc, attempts = 0, extra = 0;
2383 mv_reset_channel(hpriv, mmio, ap->port_no);
2384 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2386 /* Workaround for errata FEr SATA#10 (part 2) */
2388 const unsigned long *timing =
2389 sata_ehc_deb_timing(&link->eh_context);
2391 rc = sata_link_hardreset(link, timing, deadline + extra,
2395 sata_scr_read(link, SCR_STATUS, &sstatus);
2396 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2397 /* Force 1.5gb/s link speed and try again */
2398 mv_setup_ifcfg(mv_ap_base(ap), 0);
2399 if (time_after(jiffies + HZ, deadline))
2400 extra = HZ; /* only extend it once, max */
2402 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2407 static void mv_eh_freeze(struct ata_port *ap)
2409 struct mv_host_priv *hpriv = ap->host->private_data;
2410 unsigned int shift, hardport, port = ap->port_no;
2413 /* FIXME: handle coalescing completion events properly */
2416 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2418 /* disable assertion of portN err, done events */
2419 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2420 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2421 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2424 static void mv_eh_thaw(struct ata_port *ap)
2426 struct mv_host_priv *hpriv = ap->host->private_data;
2427 unsigned int shift, hardport, port = ap->port_no;
2428 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2429 void __iomem *port_mmio = mv_ap_base(ap);
2430 u32 main_irq_mask, hc_irq_cause;
2432 /* FIXME: handle coalescing completion events properly */
2434 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2436 /* clear EDMA errors on this port */
2437 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2439 /* clear pending irq events */
2440 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2441 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2442 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2444 /* enable assertion of portN err, done events */
2445 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2446 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2447 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2451 * mv_port_init - Perform some early initialization on a single port.
2452 * @port: libata data structure storing shadow register addresses
2453 * @port_mmio: base address of the port
2455 * Initialize shadow register mmio addresses, clear outstanding
2456 * interrupts on the port, and unmask interrupts for the future
2457 * start of the port.
2460 * Inherited from caller.
2462 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2464 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2467 /* PIO related setup
2469 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2471 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2472 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2473 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2474 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2475 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2476 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2478 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2479 /* special case: control/altstatus doesn't have ATA_REG_ address */
2480 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2483 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2485 /* Clear any currently outstanding port interrupt conditions */
2486 serr_ofs = mv_scr_offset(SCR_ERROR);
2487 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2488 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2490 /* unmask all non-transient EDMA error interrupts */
2491 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2493 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2494 readl(port_mmio + EDMA_CFG_OFS),
2495 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2496 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2499 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2501 struct pci_dev *pdev = to_pci_dev(host->dev);
2502 struct mv_host_priv *hpriv = host->private_data;
2503 u32 hp_flags = hpriv->hp_flags;
2505 switch (board_idx) {
2507 hpriv->ops = &mv5xxx_ops;
2508 hp_flags |= MV_HP_GEN_I;
2510 switch (pdev->revision) {
2512 hp_flags |= MV_HP_ERRATA_50XXB0;
2515 hp_flags |= MV_HP_ERRATA_50XXB2;
2518 dev_printk(KERN_WARNING, &pdev->dev,
2519 "Applying 50XXB2 workarounds to unknown rev\n");
2520 hp_flags |= MV_HP_ERRATA_50XXB2;
2527 hpriv->ops = &mv5xxx_ops;
2528 hp_flags |= MV_HP_GEN_I;
2530 switch (pdev->revision) {
2532 hp_flags |= MV_HP_ERRATA_50XXB0;
2535 hp_flags |= MV_HP_ERRATA_50XXB2;
2538 dev_printk(KERN_WARNING, &pdev->dev,
2539 "Applying B2 workarounds to unknown rev\n");
2540 hp_flags |= MV_HP_ERRATA_50XXB2;
2547 hpriv->ops = &mv6xxx_ops;
2548 hp_flags |= MV_HP_GEN_II;
2550 switch (pdev->revision) {
2552 hp_flags |= MV_HP_ERRATA_60X1B2;
2555 hp_flags |= MV_HP_ERRATA_60X1C0;
2558 dev_printk(KERN_WARNING, &pdev->dev,
2559 "Applying B2 workarounds to unknown rev\n");
2560 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 hp_flags |= MV_HP_PCIE;
2567 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2568 (pdev->device == 0x2300 || pdev->device == 0x2310))
2571 * Highpoint RocketRAID PCIe 23xx series cards:
2573 * Unconfigured drives are treated as "Legacy"
2574 * by the BIOS, and it overwrites sector 8 with
2575 * a "Lgcy" metadata block prior to Linux boot.
2577 * Configured drives (RAID or JBOD) leave sector 8
2578 * alone, but instead overwrite a high numbered
2579 * sector for the RAID metadata. This sector can
2580 * be determined exactly, by truncating the physical
2581 * drive capacity to a nice even GB value.
2583 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2585 * Warn the user, lest they think we're just buggy.
2587 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2588 " BIOS CORRUPTS DATA on all attached drives,"
2589 " regardless of if/how they are configured."
2591 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2592 " use sectors 8-9 on \"Legacy\" drives,"
2593 " and avoid the final two gigabytes on"
2594 " all RocketRAID BIOS initialized drives.\n");
2598 hpriv->ops = &mv6xxx_ops;
2599 hp_flags |= MV_HP_GEN_IIE;
2601 switch (pdev->revision) {
2603 hp_flags |= MV_HP_ERRATA_XX42A0;
2606 hp_flags |= MV_HP_ERRATA_60X1C0;
2609 dev_printk(KERN_WARNING, &pdev->dev,
2610 "Applying 60X1C0 workarounds to unknown rev\n");
2611 hp_flags |= MV_HP_ERRATA_60X1C0;
2616 hpriv->ops = &mv_soc_ops;
2617 hp_flags |= MV_HP_ERRATA_60X1C0;
2621 dev_printk(KERN_ERR, host->dev,
2622 "BUG: invalid board index %u\n", board_idx);
2626 hpriv->hp_flags = hp_flags;
2627 if (hp_flags & MV_HP_PCIE) {
2628 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2629 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2630 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2632 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2633 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2634 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2641 * mv_init_host - Perform some early initialization of the host.
2642 * @host: ATA host to initialize
2643 * @board_idx: controller index
2645 * If possible, do an early global reset of the host. Then do
2646 * our port init and clear/unmask all/relevant host interrupts.
2649 * Inherited from caller.
2651 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2653 int rc = 0, n_hc, port, hc;
2654 struct mv_host_priv *hpriv = host->private_data;
2655 void __iomem *mmio = hpriv->base;
2657 rc = mv_chip_id(host, board_idx);
2661 if (HAS_PCI(host)) {
2662 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
2663 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
2665 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
2666 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
2669 /* global interrupt mask: 0 == mask everything */
2670 writel(0, hpriv->main_irq_mask_addr);
2672 n_hc = mv_get_hc_count(host->ports[0]->flags);
2674 for (port = 0; port < host->n_ports; port++)
2675 hpriv->ops->read_preamp(hpriv, port, mmio);
2677 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2681 hpriv->ops->reset_flash(hpriv, mmio);
2682 hpriv->ops->reset_bus(host, mmio);
2683 hpriv->ops->enable_leds(hpriv, mmio);
2685 for (port = 0; port < host->n_ports; port++) {
2686 struct ata_port *ap = host->ports[port];
2687 void __iomem *port_mmio = mv_port_base(mmio, port);
2689 mv_port_init(&ap->ioaddr, port_mmio);
2692 if (HAS_PCI(host)) {
2693 unsigned int offset = port_mmio - mmio;
2694 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2695 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2700 for (hc = 0; hc < n_hc; hc++) {
2701 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2703 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2704 "(before clear)=0x%08x\n", hc,
2705 readl(hc_mmio + HC_CFG_OFS),
2706 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2708 /* Clear any currently outstanding hc interrupt conditions */
2709 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2712 if (HAS_PCI(host)) {
2713 /* Clear any currently outstanding host interrupt conditions */
2714 writelfl(0, mmio + hpriv->irq_cause_ofs);
2716 /* and unmask interrupt generation for host regs */
2717 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2718 if (IS_GEN_I(hpriv))
2719 writelfl(~HC_MAIN_MASKED_IRQS_5,
2720 hpriv->main_irq_mask_addr);
2722 writelfl(~HC_MAIN_MASKED_IRQS,
2723 hpriv->main_irq_mask_addr);
2725 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2726 "PCI int cause/mask=0x%08x/0x%08x\n",
2727 readl(hpriv->main_irq_cause_addr),
2728 readl(hpriv->main_irq_mask_addr),
2729 readl(mmio + hpriv->irq_cause_ofs),
2730 readl(mmio + hpriv->irq_mask_ofs));
2732 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2733 hpriv->main_irq_mask_addr);
2734 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2735 readl(hpriv->main_irq_cause_addr),
2736 readl(hpriv->main_irq_mask_addr));
2742 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2744 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2746 if (!hpriv->crqb_pool)
2749 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2751 if (!hpriv->crpb_pool)
2754 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2756 if (!hpriv->sg_tbl_pool)
2762 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2763 struct mbus_dram_target_info *dram)
2767 for (i = 0; i < 4; i++) {
2768 writel(0, hpriv->base + WINDOW_CTRL(i));
2769 writel(0, hpriv->base + WINDOW_BASE(i));
2772 for (i = 0; i < dram->num_cs; i++) {
2773 struct mbus_dram_window *cs = dram->cs + i;
2775 writel(((cs->size - 1) & 0xffff0000) |
2776 (cs->mbus_attr << 8) |
2777 (dram->mbus_dram_target_id << 4) | 1,
2778 hpriv->base + WINDOW_CTRL(i));
2779 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2784 * mv_platform_probe - handle a positive probe of an soc Marvell
2786 * @pdev: platform device found
2789 * Inherited from caller.
2791 static int mv_platform_probe(struct platform_device *pdev)
2793 static int printed_version;
2794 const struct mv_sata_platform_data *mv_platform_data;
2795 const struct ata_port_info *ppi[] =
2796 { &mv_port_info[chip_soc], NULL };
2797 struct ata_host *host;
2798 struct mv_host_priv *hpriv;
2799 struct resource *res;
2802 if (!printed_version++)
2803 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2806 * Simple resource validation ..
2808 if (unlikely(pdev->num_resources != 2)) {
2809 dev_err(&pdev->dev, "invalid number of resources\n");
2814 * Get the register base first
2816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2821 mv_platform_data = pdev->dev.platform_data;
2822 n_ports = mv_platform_data->n_ports;
2824 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2825 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2827 if (!host || !hpriv)
2829 host->private_data = hpriv;
2830 hpriv->n_ports = n_ports;
2833 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2834 res->end - res->start + 1);
2835 hpriv->base -= MV_SATAHC0_REG_BASE;
2838 * (Re-)program MBUS remapping windows if we are asked to.
2840 if (mv_platform_data->dram != NULL)
2841 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2843 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2847 /* initialize adapter */
2848 rc = mv_init_host(host, chip_soc);
2852 dev_printk(KERN_INFO, &pdev->dev,
2853 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2856 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2857 IRQF_SHARED, &mv6_sht);
2862 * mv_platform_remove - unplug a platform interface
2863 * @pdev: platform device
2865 * A platform bus SATA device has been unplugged. Perform the needed
2866 * cleanup. Also called on module unload for any active devices.
2868 static int __devexit mv_platform_remove(struct platform_device *pdev)
2870 struct device *dev = &pdev->dev;
2871 struct ata_host *host = dev_get_drvdata(dev);
2873 ata_host_detach(host);
2877 static struct platform_driver mv_platform_driver = {
2878 .probe = mv_platform_probe,
2879 .remove = __devexit_p(mv_platform_remove),
2882 .owner = THIS_MODULE,
2888 static int mv_pci_init_one(struct pci_dev *pdev,
2889 const struct pci_device_id *ent);
2892 static struct pci_driver mv_pci_driver = {
2894 .id_table = mv_pci_tbl,
2895 .probe = mv_pci_init_one,
2896 .remove = ata_pci_remove_one,
2902 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2905 /* move to PCI layer or libata core? */
2906 static int pci_go_64(struct pci_dev *pdev)
2910 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2911 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2913 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2915 dev_printk(KERN_ERR, &pdev->dev,
2916 "64-bit DMA enable failed\n");
2921 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2923 dev_printk(KERN_ERR, &pdev->dev,
2924 "32-bit DMA enable failed\n");
2927 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2929 dev_printk(KERN_ERR, &pdev->dev,
2930 "32-bit consistent DMA enable failed\n");
2939 * mv_print_info - Dump key info to kernel log for perusal.
2940 * @host: ATA host to print info about
2942 * FIXME: complete this.
2945 * Inherited from caller.
2947 static void mv_print_info(struct ata_host *host)
2949 struct pci_dev *pdev = to_pci_dev(host->dev);
2950 struct mv_host_priv *hpriv = host->private_data;
2952 const char *scc_s, *gen;
2954 /* Use this to determine the HW stepping of the chip so we know
2955 * what errata to workaround
2957 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2960 else if (scc == 0x01)
2965 if (IS_GEN_I(hpriv))
2967 else if (IS_GEN_II(hpriv))
2969 else if (IS_GEN_IIE(hpriv))
2974 dev_printk(KERN_INFO, &pdev->dev,
2975 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2976 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2977 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2981 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2982 * @pdev: PCI device found
2983 * @ent: PCI device ID entry for the matched host
2986 * Inherited from caller.
2988 static int mv_pci_init_one(struct pci_dev *pdev,
2989 const struct pci_device_id *ent)
2991 static int printed_version;
2992 unsigned int board_idx = (unsigned int)ent->driver_data;
2993 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2994 struct ata_host *host;
2995 struct mv_host_priv *hpriv;
2998 if (!printed_version++)
2999 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3002 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3004 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3005 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3006 if (!host || !hpriv)
3008 host->private_data = hpriv;
3009 hpriv->n_ports = n_ports;
3011 /* acquire resources */
3012 rc = pcim_enable_device(pdev);
3016 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3018 pcim_pin_device(pdev);
3021 host->iomap = pcim_iomap_table(pdev);
3022 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3024 rc = pci_go_64(pdev);
3028 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3032 /* initialize adapter */
3033 rc = mv_init_host(host, board_idx);
3037 /* Enable interrupts */
3038 if (msi && pci_enable_msi(pdev))
3041 mv_dump_pci_cfg(pdev, 0x68);
3042 mv_print_info(host);
3044 pci_set_master(pdev);
3045 pci_try_set_mwi(pdev);
3046 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3047 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3051 static int mv_platform_probe(struct platform_device *pdev);
3052 static int __devexit mv_platform_remove(struct platform_device *pdev);
3054 static int __init mv_init(void)
3058 rc = pci_register_driver(&mv_pci_driver);
3062 rc = platform_driver_register(&mv_platform_driver);
3066 pci_unregister_driver(&mv_pci_driver);
3071 static void __exit mv_exit(void)
3074 pci_unregister_driver(&mv_pci_driver);
3076 platform_driver_unregister(&mv_platform_driver);
3079 MODULE_AUTHOR("Brett Russ");
3080 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3081 MODULE_LICENSE("GPL");
3082 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3083 MODULE_VERSION(DRV_VERSION);
3084 MODULE_ALIAS("platform:" DRV_NAME);
3087 module_param(msi, int, 0444);
3088 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3091 module_init(mv_init);
3092 module_exit(mv_exit);