2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 * --> Errata workaround for NCQ device errors.
30 * --> More errata workarounds for PCI-X.
32 * --> Complete a full errata audit for all chipsets to identify others.
34 * --> Develop a low-power-consumption strategy, and implement it.
36 * --> [Experiment, low priority] Investigate interrupt coalescing.
37 * Quite often, especially with PCI Message Signalled Interrupts (MSI),
38 * the overhead reduced by interrupt mitigation is quite often not
39 * worth the latency cost.
41 * --> [Experiment, Marvell value added] Is it possible to use target
42 * mode to cross-connect two Linux boxes with Marvell cards? If so,
43 * creating LibATA target mode support would be very interesting.
45 * Target mode, for those without docs, is the ability to directly
46 * connect two SATA ports.
49 #include <linux/kernel.h>
50 #include <linux/module.h>
51 #include <linux/pci.h>
52 #include <linux/init.h>
53 #include <linux/blkdev.h>
54 #include <linux/delay.h>
55 #include <linux/interrupt.h>
56 #include <linux/dmapool.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/device.h>
59 #include <linux/platform_device.h>
60 #include <linux/ata_platform.h>
61 #include <linux/mbus.h>
62 #include <linux/bitops.h>
63 #include <scsi/scsi_host.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_device.h>
66 #include <linux/libata.h>
68 #define DRV_NAME "sata_mv"
69 #define DRV_VERSION "1.26"
72 /* BAR's are enumerated in terms of pci_resource_start() terms */
73 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
74 MV_IO_BAR = 2, /* offset 0x18: IO space */
75 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
77 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
78 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
81 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
82 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
83 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
84 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
85 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
86 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
88 MV_SATAHC0_REG_BASE = 0x20000,
89 MV_FLASH_CTL_OFS = 0x1046c,
90 MV_GPIO_PORT_CTL_OFS = 0x104f0,
91 MV_RESET_CFG_OFS = 0x180d8,
93 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
94 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
95 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
96 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
99 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
101 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
102 * CRPB needs alignment on a 256B boundary. Size == 256B
103 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
105 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
106 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
108 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
110 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
111 MV_PORT_HC_SHIFT = 2,
112 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
113 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
114 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
117 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
118 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
120 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
121 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING,
123 MV_GEN_I_FLAGS = MV_COMMON_FLAGS | ATA_FLAG_NO_ATAPI,
125 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE |
126 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
129 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
131 CRQB_FLAG_READ = (1 << 0),
133 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
134 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
135 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
136 CRQB_CMD_ADDR_SHIFT = 8,
137 CRQB_CMD_CS = (0x2 << 11),
138 CRQB_CMD_LAST = (1 << 15),
140 CRPB_FLAG_STATUS_SHIFT = 8,
141 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
142 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
144 EPRD_FLAG_END_OF_TBL = (1 << 31),
146 /* PCI interface registers */
148 PCI_COMMAND_OFS = 0xc00,
149 PCI_COMMAND_MRDTRIG = (1 << 7), /* PCI Master Read Trigger */
151 PCI_MAIN_CMD_STS_OFS = 0xd30,
152 STOP_PCI_MASTER = (1 << 2),
153 PCI_MASTER_EMPTY = (1 << 3),
154 GLOB_SFT_RST = (1 << 4),
156 MV_PCI_MODE_OFS = 0xd00,
157 MV_PCI_MODE_MASK = 0x30,
159 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
160 MV_PCI_DISC_TIMER = 0xd04,
161 MV_PCI_MSI_TRIGGER = 0xc38,
162 MV_PCI_SERR_MASK = 0xc28,
163 MV_PCI_XBAR_TMOUT_OFS = 0x1d04,
164 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
165 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
166 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
167 MV_PCI_ERR_COMMAND = 0x1d50,
169 PCI_IRQ_CAUSE_OFS = 0x1d58,
170 PCI_IRQ_MASK_OFS = 0x1d5c,
171 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
173 PCIE_IRQ_CAUSE_OFS = 0x1900,
174 PCIE_IRQ_MASK_OFS = 0x1910,
175 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
177 /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
178 PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
179 PCI_HC_MAIN_IRQ_MASK_OFS = 0x1d64,
180 SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
181 SOC_HC_MAIN_IRQ_MASK_OFS = 0x20024,
182 ERR_IRQ = (1 << 0), /* shift by port # */
183 DONE_IRQ = (1 << 1), /* shift by port # */
184 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
185 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
187 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
188 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
189 PORTS_0_3_COAL_DONE = (1 << 8),
190 PORTS_4_7_COAL_DONE = (1 << 17),
191 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
192 GPIO_INT = (1 << 22),
193 SELF_INT = (1 << 23),
194 TWSI_INT = (1 << 24),
195 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
196 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
197 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
199 /* SATAHC registers */
202 HC_IRQ_CAUSE_OFS = 0x14,
203 DMA_IRQ = (1 << 0), /* shift by port # */
204 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
205 DEV_IRQ = (1 << 8), /* shift by port # */
207 /* Shadow block registers */
209 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
212 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
213 SATA_ACTIVE_OFS = 0x350,
214 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
215 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
218 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
222 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
223 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
224 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
225 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
228 SATA_IFCTL_OFS = 0x344,
229 SATA_TESTCTL_OFS = 0x348,
230 SATA_IFSTAT_OFS = 0x34c,
231 VENDOR_UNIQUE_FIS_OFS = 0x35c,
234 FISCFG_WAIT_DEV_ERR = (1 << 8), /* wait for host on DevErr */
235 FISCFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
238 MV5_LTMODE_OFS = 0x30,
239 MV5_PHY_CTL_OFS = 0x0C,
240 SATA_INTERFACE_CFG_OFS = 0x050,
242 MV_M2_PREAMP_MASK = 0x7e0,
246 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
247 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
248 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
249 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
250 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
251 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
252 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
254 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
255 EDMA_ERR_IRQ_MASK_OFS = 0xc,
256 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
257 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
258 EDMA_ERR_DEV = (1 << 2), /* device error */
259 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
260 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
261 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
262 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
263 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
264 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
265 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
266 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
267 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
268 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
269 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
271 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
272 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
273 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
274 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
275 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
277 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
279 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
280 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
283 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
284 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
286 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
288 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
289 EDMA_ERR_OVERRUN_5 = (1 << 5),
290 EDMA_ERR_UNDERRUN_5 = (1 << 6),
292 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
293 EDMA_ERR_LNK_CTRL_RX_1 |
294 EDMA_ERR_LNK_CTRL_RX_3 |
295 EDMA_ERR_LNK_CTRL_TX,
297 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
307 EDMA_ERR_LNK_CTRL_RX_2 |
308 EDMA_ERR_LNK_DATA_RX |
309 EDMA_ERR_LNK_DATA_TX |
310 EDMA_ERR_TRANS_PROTO,
312 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
317 EDMA_ERR_UNDERRUN_5 |
318 EDMA_ERR_SELF_DIS_5 |
324 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
325 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
327 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
328 EDMA_REQ_Q_PTR_SHIFT = 5,
330 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
331 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
332 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
333 EDMA_RSP_Q_PTR_SHIFT = 3,
335 EDMA_CMD_OFS = 0x28, /* EDMA command register */
336 EDMA_EN = (1 << 0), /* enable EDMA */
337 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
338 EDMA_RESET = (1 << 2), /* reset eng/trans/link/phy */
340 EDMA_STATUS_OFS = 0x30, /* EDMA engine status */
341 EDMA_STATUS_CACHE_EMPTY = (1 << 6), /* GenIIe command cache empty */
342 EDMA_STATUS_IDLE = (1 << 7), /* GenIIe EDMA enabled/idle */
344 EDMA_IORDY_TMOUT_OFS = 0x34,
345 EDMA_ARB_CFG_OFS = 0x38,
347 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
348 EDMA_UNKNOWN_RSVD_OFS = 0x6C, /* GenIIe unknown/reserved */
350 BMDMA_CMD_OFS = 0x224, /* bmdma command register */
351 BMDMA_STATUS_OFS = 0x228, /* bmdma status register */
352 BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */
353 BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */
355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
366 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
368 /* Port private flags (pp_flags) */
369 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
370 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
371 MV_PP_FLAG_FBS_EN = (1 << 2), /* is EDMA set up for FBS? */
372 MV_PP_FLAG_DELAYED_EH = (1 << 3), /* delayed dev err handling */
373 MV_PP_FLAG_FAKE_ATA_BUSY = (1 << 4), /* ignore initial ATA_DRDY */
376 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
377 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
378 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
379 #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
380 #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
382 #define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
383 #define WINDOW_BASE(i) (0x20034 + ((i) << 4))
386 /* DMA boundary 0xffff is required by the s/g splitting
387 * we need on /length/ in mv_fill-sg().
389 MV_DMA_BOUNDARY = 0xffffU,
391 /* mask of register bits containing lower 32 bits
392 * of EDMA request queue DMA address
394 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
396 /* ditto, for response queue */
397 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
411 /* Command ReQuest Block: 32B */
427 /* Command ResPonse Block: 8B */
434 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
443 * We keep a local cache of a few frequently accessed port
444 * registers here, to avoid having to read them (very slow)
445 * when switching between EDMA and non-EDMA modes.
447 struct mv_cached_regs {
454 struct mv_port_priv {
455 struct mv_crqb *crqb;
457 struct mv_crpb *crpb;
459 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
460 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
462 unsigned int req_idx;
463 unsigned int resp_idx;
466 struct mv_cached_regs cached;
467 unsigned int delayed_eh_pmp_map;
470 struct mv_port_signal {
475 struct mv_host_priv {
478 struct mv_port_signal signal[8];
479 const struct mv_hw_ops *ops;
482 void __iomem *main_irq_cause_addr;
483 void __iomem *main_irq_mask_addr;
488 * These consistent DMA memory pools give us guaranteed
489 * alignment for hardware-accessed data structures,
490 * and less memory waste in accomplishing the alignment.
492 struct dma_pool *crqb_pool;
493 struct dma_pool *crpb_pool;
494 struct dma_pool *sg_tbl_pool;
498 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
500 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
501 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
503 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
505 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
506 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
509 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
510 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
511 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val);
512 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val);
513 static int mv_port_start(struct ata_port *ap);
514 static void mv_port_stop(struct ata_port *ap);
515 static int mv_qc_defer(struct ata_queued_cmd *qc);
516 static void mv_qc_prep(struct ata_queued_cmd *qc);
517 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
518 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
519 static int mv_hardreset(struct ata_link *link, unsigned int *class,
520 unsigned long deadline);
521 static void mv_eh_freeze(struct ata_port *ap);
522 static void mv_eh_thaw(struct ata_port *ap);
523 static void mv6_dev_config(struct ata_device *dev);
525 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
527 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
528 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
530 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
532 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
533 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
535 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
537 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
538 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
540 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
542 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
543 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
545 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
547 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
548 void __iomem *mmio, unsigned int n_hc);
549 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
551 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
552 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
553 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
554 unsigned int port_no);
555 static int mv_stop_edma(struct ata_port *ap);
556 static int mv_stop_edma_engine(void __iomem *port_mmio);
557 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma);
559 static void mv_pmp_select(struct ata_port *ap, int pmp);
560 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
561 unsigned long deadline);
562 static int mv_softreset(struct ata_link *link, unsigned int *class,
563 unsigned long deadline);
564 static void mv_pmp_error_handler(struct ata_port *ap);
565 static void mv_process_crpb_entries(struct ata_port *ap,
566 struct mv_port_priv *pp);
568 static void mv_sff_irq_clear(struct ata_port *ap);
569 static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
570 static void mv_bmdma_setup(struct ata_queued_cmd *qc);
571 static void mv_bmdma_start(struct ata_queued_cmd *qc);
572 static void mv_bmdma_stop(struct ata_queued_cmd *qc);
573 static u8 mv_bmdma_status(struct ata_port *ap);
574 static u8 mv_sff_check_status(struct ata_port *ap);
576 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
577 * because we have to allow room for worst case splitting of
578 * PRDs for 64K boundaries in mv_fill_sg().
580 static struct scsi_host_template mv5_sht = {
581 ATA_BASE_SHT(DRV_NAME),
582 .sg_tablesize = MV_MAX_SG_CT / 2,
583 .dma_boundary = MV_DMA_BOUNDARY,
586 static struct scsi_host_template mv6_sht = {
587 ATA_NCQ_SHT(DRV_NAME),
588 .can_queue = MV_MAX_Q_DEPTH - 1,
589 .sg_tablesize = MV_MAX_SG_CT / 2,
590 .dma_boundary = MV_DMA_BOUNDARY,
593 static struct ata_port_operations mv5_ops = {
594 .inherits = &ata_sff_port_ops,
596 .qc_defer = mv_qc_defer,
597 .qc_prep = mv_qc_prep,
598 .qc_issue = mv_qc_issue,
600 .freeze = mv_eh_freeze,
602 .hardreset = mv_hardreset,
603 .error_handler = ata_std_error_handler, /* avoid SFF EH */
604 .post_internal_cmd = ATA_OP_NULL,
606 .scr_read = mv5_scr_read,
607 .scr_write = mv5_scr_write,
609 .port_start = mv_port_start,
610 .port_stop = mv_port_stop,
613 static struct ata_port_operations mv6_ops = {
614 .inherits = &mv5_ops,
615 .dev_config = mv6_dev_config,
616 .scr_read = mv_scr_read,
617 .scr_write = mv_scr_write,
619 .pmp_hardreset = mv_pmp_hardreset,
620 .pmp_softreset = mv_softreset,
621 .softreset = mv_softreset,
622 .error_handler = mv_pmp_error_handler,
624 .sff_check_status = mv_sff_check_status,
625 .sff_irq_clear = mv_sff_irq_clear,
626 .check_atapi_dma = mv_check_atapi_dma,
627 .bmdma_setup = mv_bmdma_setup,
628 .bmdma_start = mv_bmdma_start,
629 .bmdma_stop = mv_bmdma_stop,
630 .bmdma_status = mv_bmdma_status,
633 static struct ata_port_operations mv_iie_ops = {
634 .inherits = &mv6_ops,
635 .dev_config = ATA_OP_NULL,
636 .qc_prep = mv_qc_prep_iie,
639 static const struct ata_port_info mv_port_info[] = {
641 .flags = MV_GEN_I_FLAGS,
642 .pio_mask = 0x1f, /* pio0-4 */
643 .udma_mask = ATA_UDMA6,
644 .port_ops = &mv5_ops,
647 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv5_ops,
653 .flags = MV_GEN_I_FLAGS | MV_FLAG_DUAL_HC,
654 .pio_mask = 0x1f, /* pio0-4 */
655 .udma_mask = ATA_UDMA6,
656 .port_ops = &mv5_ops,
659 .flags = MV_GEN_II_FLAGS,
660 .pio_mask = 0x1f, /* pio0-4 */
661 .udma_mask = ATA_UDMA6,
662 .port_ops = &mv6_ops,
665 .flags = MV_GEN_II_FLAGS | MV_FLAG_DUAL_HC,
666 .pio_mask = 0x1f, /* pio0-4 */
667 .udma_mask = ATA_UDMA6,
668 .port_ops = &mv6_ops,
671 .flags = MV_GEN_IIE_FLAGS,
672 .pio_mask = 0x1f, /* pio0-4 */
673 .udma_mask = ATA_UDMA6,
674 .port_ops = &mv_iie_ops,
677 .flags = MV_GEN_IIE_FLAGS,
678 .pio_mask = 0x1f, /* pio0-4 */
679 .udma_mask = ATA_UDMA6,
680 .port_ops = &mv_iie_ops,
683 .flags = MV_GEN_IIE_FLAGS,
684 .pio_mask = 0x1f, /* pio0-4 */
685 .udma_mask = ATA_UDMA6,
686 .port_ops = &mv_iie_ops,
690 static const struct pci_device_id mv_pci_tbl[] = {
691 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
692 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
693 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
694 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
695 /* RocketRAID 1720/174x have different identifiers */
696 { PCI_VDEVICE(TTI, 0x1720), chip_6042 },
697 { PCI_VDEVICE(TTI, 0x1740), chip_6042 },
698 { PCI_VDEVICE(TTI, 0x1742), chip_6042 },
700 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
701 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
702 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
703 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
704 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
706 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
709 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
711 /* Marvell 7042 support */
712 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
714 /* Highpoint RocketRAID PCIe series */
715 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
716 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
718 { } /* terminate list */
721 static const struct mv_hw_ops mv5xxx_ops = {
722 .phy_errata = mv5_phy_errata,
723 .enable_leds = mv5_enable_leds,
724 .read_preamp = mv5_read_preamp,
725 .reset_hc = mv5_reset_hc,
726 .reset_flash = mv5_reset_flash,
727 .reset_bus = mv5_reset_bus,
730 static const struct mv_hw_ops mv6xxx_ops = {
731 .phy_errata = mv6_phy_errata,
732 .enable_leds = mv6_enable_leds,
733 .read_preamp = mv6_read_preamp,
734 .reset_hc = mv6_reset_hc,
735 .reset_flash = mv6_reset_flash,
736 .reset_bus = mv_reset_pci_bus,
739 static const struct mv_hw_ops mv_soc_ops = {
740 .phy_errata = mv6_phy_errata,
741 .enable_leds = mv_soc_enable_leds,
742 .read_preamp = mv_soc_read_preamp,
743 .reset_hc = mv_soc_reset_hc,
744 .reset_flash = mv_soc_reset_flash,
745 .reset_bus = mv_soc_reset_bus,
752 static inline void writelfl(unsigned long data, void __iomem *addr)
755 (void) readl(addr); /* flush to avoid PCI posted write */
758 static inline unsigned int mv_hc_from_port(unsigned int port)
760 return port >> MV_PORT_HC_SHIFT;
763 static inline unsigned int mv_hardport_from_port(unsigned int port)
765 return port & MV_PORT_MASK;
769 * Consolidate some rather tricky bit shift calculations.
770 * This is hot-path stuff, so not a function.
771 * Simple code, with two return values, so macro rather than inline.
773 * port is the sole input, in range 0..7.
774 * shift is one output, for use with main_irq_cause / main_irq_mask registers.
775 * hardport is the other output, in range 0..3.
777 * Note that port and hardport may be the same variable in some cases.
779 #define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
781 shift = mv_hc_from_port(port) * HC_SHIFT; \
782 hardport = mv_hardport_from_port(port); \
783 shift += hardport * 2; \
786 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
788 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
791 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
794 return mv_hc_base(base, mv_hc_from_port(port));
797 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
799 return mv_hc_base_from_port(base, port) +
800 MV_SATAHC_ARBTR_REG_SZ +
801 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
804 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
806 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
807 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
809 return hc_mmio + ofs;
812 static inline void __iomem *mv_host_base(struct ata_host *host)
814 struct mv_host_priv *hpriv = host->private_data;
818 static inline void __iomem *mv_ap_base(struct ata_port *ap)
820 return mv_port_base(mv_host_base(ap->host), ap->port_no);
823 static inline int mv_get_hc_count(unsigned long port_flags)
825 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
829 * mv_save_cached_regs - (re-)initialize cached port registers
830 * @ap: the port whose registers we are caching
832 * Initialize the local cache of port registers,
833 * so that reading them over and over again can
834 * be avoided on the hotter paths of this driver.
835 * This saves a few microseconds each time we switch
836 * to/from EDMA mode to perform (eg.) a drive cache flush.
838 static void mv_save_cached_regs(struct ata_port *ap)
840 void __iomem *port_mmio = mv_ap_base(ap);
841 struct mv_port_priv *pp = ap->private_data;
843 pp->cached.fiscfg = readl(port_mmio + FISCFG_OFS);
844 pp->cached.ltmode = readl(port_mmio + LTMODE_OFS);
845 pp->cached.haltcond = readl(port_mmio + EDMA_HALTCOND_OFS);
846 pp->cached.unknown_rsvd = readl(port_mmio + EDMA_UNKNOWN_RSVD_OFS);
850 * mv_write_cached_reg - write to a cached port register
851 * @addr: hardware address of the register
852 * @old: pointer to cached value of the register
853 * @new: new value for the register
855 * Write a new value to a cached register,
856 * but only if the value is different from before.
858 static inline void mv_write_cached_reg(void __iomem *addr, u32 *old, u32 new)
866 static void mv_set_edma_ptrs(void __iomem *port_mmio,
867 struct mv_host_priv *hpriv,
868 struct mv_port_priv *pp)
873 * initialize request queue
875 pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
876 index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
878 WARN_ON(pp->crqb_dma & 0x3ff);
879 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
880 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
881 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
882 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
885 * initialize response queue
887 pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */
888 index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT;
890 WARN_ON(pp->crpb_dma & 0xff);
891 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
892 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
893 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
894 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
897 static void mv_set_main_irq_mask(struct ata_host *host,
898 u32 disable_bits, u32 enable_bits)
900 struct mv_host_priv *hpriv = host->private_data;
901 u32 old_mask, new_mask;
903 old_mask = hpriv->main_irq_mask;
904 new_mask = (old_mask & ~disable_bits) | enable_bits;
905 if (new_mask != old_mask) {
906 hpriv->main_irq_mask = new_mask;
907 writelfl(new_mask, hpriv->main_irq_mask_addr);
911 static void mv_enable_port_irqs(struct ata_port *ap,
912 unsigned int port_bits)
914 unsigned int shift, hardport, port = ap->port_no;
915 u32 disable_bits, enable_bits;
917 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
919 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
920 enable_bits = port_bits << shift;
921 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
924 static void mv_clear_and_enable_port_irqs(struct ata_port *ap,
925 void __iomem *port_mmio,
926 unsigned int port_irqs)
928 struct mv_host_priv *hpriv = ap->host->private_data;
929 int hardport = mv_hardport_from_port(ap->port_no);
930 void __iomem *hc_mmio = mv_hc_base_from_port(
931 mv_host_base(ap->host), ap->port_no);
934 /* clear EDMA event indicators, if any */
935 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
937 /* clear pending irq events */
938 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
939 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
941 /* clear FIS IRQ Cause */
942 if (IS_GEN_IIE(hpriv))
943 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
945 mv_enable_port_irqs(ap, port_irqs);
949 * mv_start_edma - Enable eDMA engine
950 * @base: port base address
951 * @pp: port private data
953 * Verify the local cache of the eDMA state is accurate with a
957 * Inherited from caller.
959 static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio,
960 struct mv_port_priv *pp, u8 protocol)
962 int want_ncq = (protocol == ATA_PROT_NCQ);
964 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
965 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
966 if (want_ncq != using_ncq)
969 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
970 struct mv_host_priv *hpriv = ap->host->private_data;
972 mv_edma_cfg(ap, want_ncq, 1);
974 mv_set_edma_ptrs(port_mmio, hpriv, pp);
975 mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ);
977 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
978 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
982 static void mv_wait_for_edma_empty_idle(struct ata_port *ap)
984 void __iomem *port_mmio = mv_ap_base(ap);
985 const u32 empty_idle = (EDMA_STATUS_CACHE_EMPTY | EDMA_STATUS_IDLE);
986 const int per_loop = 5, timeout = (15 * 1000 / per_loop);
990 * Wait for the EDMA engine to finish transactions in progress.
991 * No idea what a good "timeout" value might be, but measurements
992 * indicate that it often requires hundreds of microseconds
993 * with two drives in-use. So we use the 15msec value above
994 * as a rough guess at what even more drives might require.
996 for (i = 0; i < timeout; ++i) {
997 u32 edma_stat = readl(port_mmio + EDMA_STATUS_OFS);
998 if ((edma_stat & empty_idle) == empty_idle)
1002 /* ata_port_printk(ap, KERN_INFO, "%s: %u+ usecs\n", __func__, i); */
1006 * mv_stop_edma_engine - Disable eDMA engine
1007 * @port_mmio: io base address
1010 * Inherited from caller.
1012 static int mv_stop_edma_engine(void __iomem *port_mmio)
1016 /* Disable eDMA. The disable bit auto clears. */
1017 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1019 /* Wait for the chip to confirm eDMA is off. */
1020 for (i = 10000; i > 0; i--) {
1021 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
1022 if (!(reg & EDMA_EN))
1029 static int mv_stop_edma(struct ata_port *ap)
1031 void __iomem *port_mmio = mv_ap_base(ap);
1032 struct mv_port_priv *pp = ap->private_data;
1035 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1037 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1038 mv_wait_for_edma_empty_idle(ap);
1039 if (mv_stop_edma_engine(port_mmio)) {
1040 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
1043 mv_edma_cfg(ap, 0, 0);
1048 static void mv_dump_mem(void __iomem *start, unsigned bytes)
1051 for (b = 0; b < bytes; ) {
1052 DPRINTK("%p: ", start + b);
1053 for (w = 0; b < bytes && w < 4; w++) {
1054 printk("%08x ", readl(start + b));
1062 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
1067 for (b = 0; b < bytes; ) {
1068 DPRINTK("%02x: ", b);
1069 for (w = 0; b < bytes && w < 4; w++) {
1070 (void) pci_read_config_dword(pdev, b, &dw);
1071 printk("%08x ", dw);
1078 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
1079 struct pci_dev *pdev)
1082 void __iomem *hc_base = mv_hc_base(mmio_base,
1083 port >> MV_PORT_HC_SHIFT);
1084 void __iomem *port_base;
1085 int start_port, num_ports, p, start_hc, num_hcs, hc;
1088 start_hc = start_port = 0;
1089 num_ports = 8; /* shld be benign for 4 port devs */
1092 start_hc = port >> MV_PORT_HC_SHIFT;
1094 num_ports = num_hcs = 1;
1096 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
1097 num_ports > 1 ? num_ports - 1 : start_port);
1100 DPRINTK("PCI config space regs:\n");
1101 mv_dump_pci_cfg(pdev, 0x68);
1103 DPRINTK("PCI regs:\n");
1104 mv_dump_mem(mmio_base+0xc00, 0x3c);
1105 mv_dump_mem(mmio_base+0xd00, 0x34);
1106 mv_dump_mem(mmio_base+0xf00, 0x4);
1107 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1108 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
1109 hc_base = mv_hc_base(mmio_base, hc);
1110 DPRINTK("HC regs (HC %i):\n", hc);
1111 mv_dump_mem(hc_base, 0x1c);
1113 for (p = start_port; p < start_port + num_ports; p++) {
1114 port_base = mv_port_base(mmio_base, p);
1115 DPRINTK("EDMA regs (port %i):\n", p);
1116 mv_dump_mem(port_base, 0x54);
1117 DPRINTK("SATA regs (port %i):\n", p);
1118 mv_dump_mem(port_base+0x300, 0x60);
1123 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1127 switch (sc_reg_in) {
1131 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1134 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1143 static int mv_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
1145 unsigned int ofs = mv_scr_offset(sc_reg_in);
1147 if (ofs != 0xffffffffU) {
1148 *val = readl(mv_ap_base(link->ap) + ofs);
1154 static int mv_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
1156 unsigned int ofs = mv_scr_offset(sc_reg_in);
1158 if (ofs != 0xffffffffU) {
1159 writelfl(val, mv_ap_base(link->ap) + ofs);
1165 static void mv6_dev_config(struct ata_device *adev)
1168 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1170 * Gen-II does not support NCQ over a port multiplier
1171 * (no FIS-based switching).
1173 if (adev->flags & ATA_DFLAG_NCQ) {
1174 if (sata_pmp_attached(adev->link->ap)) {
1175 adev->flags &= ~ATA_DFLAG_NCQ;
1176 ata_dev_printk(adev, KERN_INFO,
1177 "NCQ disabled for command-based switching\n");
1182 static int mv_qc_defer(struct ata_queued_cmd *qc)
1184 struct ata_link *link = qc->dev->link;
1185 struct ata_port *ap = link->ap;
1186 struct mv_port_priv *pp = ap->private_data;
1189 * Don't allow new commands if we're in a delayed EH state
1190 * for NCQ and/or FIS-based switching.
1192 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
1193 return ATA_DEFER_PORT;
1195 * If the port is completely idle, then allow the new qc.
1197 if (ap->nr_active_links == 0)
1201 * The port is operating in host queuing mode (EDMA) with NCQ
1202 * enabled, allow multiple NCQ commands. EDMA also allows
1203 * queueing multiple DMA commands but libata core currently
1206 if ((pp->pp_flags & MV_PP_FLAG_EDMA_EN) &&
1207 (pp->pp_flags & MV_PP_FLAG_NCQ_EN) && ata_is_ncq(qc->tf.protocol))
1210 return ATA_DEFER_PORT;
1213 static void mv_config_fbs(struct ata_port *ap, int want_ncq, int want_fbs)
1215 struct mv_port_priv *pp = ap->private_data;
1216 void __iomem *port_mmio;
1218 u32 fiscfg, *old_fiscfg = &pp->cached.fiscfg;
1219 u32 ltmode, *old_ltmode = &pp->cached.ltmode;
1220 u32 haltcond, *old_haltcond = &pp->cached.haltcond;
1222 ltmode = *old_ltmode & ~LTMODE_BIT8;
1223 haltcond = *old_haltcond | EDMA_ERR_DEV;
1226 fiscfg = *old_fiscfg | FISCFG_SINGLE_SYNC;
1227 ltmode = *old_ltmode | LTMODE_BIT8;
1229 haltcond &= ~EDMA_ERR_DEV;
1231 fiscfg |= FISCFG_WAIT_DEV_ERR;
1233 fiscfg = *old_fiscfg & ~(FISCFG_SINGLE_SYNC | FISCFG_WAIT_DEV_ERR);
1236 port_mmio = mv_ap_base(ap);
1237 mv_write_cached_reg(port_mmio + FISCFG_OFS, old_fiscfg, fiscfg);
1238 mv_write_cached_reg(port_mmio + LTMODE_OFS, old_ltmode, ltmode);
1239 mv_write_cached_reg(port_mmio + EDMA_HALTCOND_OFS, old_haltcond, haltcond);
1242 static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq)
1244 struct mv_host_priv *hpriv = ap->host->private_data;
1247 /* workaround for 88SX60x1 FEr SATA#25 (part 1) */
1248 old = readl(hpriv->base + MV_GPIO_PORT_CTL_OFS);
1250 new = old | (1 << 22);
1252 new = old & ~(1 << 22);
1254 writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS);
1258 * mv_bmdma_enable - set a magic bit on GEN_IIE to allow bmdma
1259 * @ap: Port being initialized
1261 * There are two DMA modes on these chips: basic DMA, and EDMA.
1263 * Bit-0 of the "EDMA RESERVED" register enables/disables use
1264 * of basic DMA on the GEN_IIE versions of the chips.
1266 * This bit survives EDMA resets, and must be set for basic DMA
1267 * to function, and should be cleared when EDMA is active.
1269 static void mv_bmdma_enable_iie(struct ata_port *ap, int enable_bmdma)
1271 struct mv_port_priv *pp = ap->private_data;
1272 u32 new, *old = &pp->cached.unknown_rsvd;
1278 mv_write_cached_reg(mv_ap_base(ap) + EDMA_UNKNOWN_RSVD_OFS, old, new);
1281 static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma)
1284 struct mv_port_priv *pp = ap->private_data;
1285 struct mv_host_priv *hpriv = ap->host->private_data;
1286 void __iomem *port_mmio = mv_ap_base(ap);
1288 /* set up non-NCQ EDMA configuration */
1289 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1291 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
1293 if (IS_GEN_I(hpriv))
1294 cfg |= (1 << 8); /* enab config burst size mask */
1296 else if (IS_GEN_II(hpriv)) {
1297 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1298 mv_60x1_errata_sata25(ap, want_ncq);
1300 } else if (IS_GEN_IIE(hpriv)) {
1301 int want_fbs = sata_pmp_attached(ap);
1303 * Possible future enhancement:
1305 * The chip can use FBS with non-NCQ, if we allow it,
1306 * But first we need to have the error handling in place
1307 * for this mode (datasheet section 7.3.15.4.2.3).
1308 * So disallow non-NCQ FBS for now.
1310 want_fbs &= want_ncq;
1312 mv_config_fbs(ap, want_ncq, want_fbs);
1315 pp->pp_flags |= MV_PP_FLAG_FBS_EN;
1316 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1319 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1321 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1323 cfg |= (1 << 18); /* enab early completion */
1325 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1326 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
1327 mv_bmdma_enable_iie(ap, !want_edma);
1331 cfg |= EDMA_CFG_NCQ;
1332 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1335 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1338 static void mv_port_free_dma_mem(struct ata_port *ap)
1340 struct mv_host_priv *hpriv = ap->host->private_data;
1341 struct mv_port_priv *pp = ap->private_data;
1345 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1349 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1353 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1354 * For later hardware, we have one unique sg_tbl per NCQ tag.
1356 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1357 if (pp->sg_tbl[tag]) {
1358 if (tag == 0 || !IS_GEN_I(hpriv))
1359 dma_pool_free(hpriv->sg_tbl_pool,
1361 pp->sg_tbl_dma[tag]);
1362 pp->sg_tbl[tag] = NULL;
1368 * mv_port_start - Port specific init/start routine.
1369 * @ap: ATA channel to manipulate
1371 * Allocate and point to DMA memory, init port private memory,
1375 * Inherited from caller.
1377 static int mv_port_start(struct ata_port *ap)
1379 struct device *dev = ap->host->dev;
1380 struct mv_host_priv *hpriv = ap->host->private_data;
1381 struct mv_port_priv *pp;
1384 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1387 ap->private_data = pp;
1389 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1392 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1394 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1396 goto out_port_free_dma_mem;
1397 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1399 /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */
1400 if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0)
1401 ap->flags |= ATA_FLAG_AN;
1403 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1404 * For later hardware, we need one unique sg_tbl per NCQ tag.
1406 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1407 if (tag == 0 || !IS_GEN_I(hpriv)) {
1408 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1409 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1410 if (!pp->sg_tbl[tag])
1411 goto out_port_free_dma_mem;
1413 pp->sg_tbl[tag] = pp->sg_tbl[0];
1414 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1417 mv_save_cached_regs(ap);
1418 mv_edma_cfg(ap, 0, 0);
1421 out_port_free_dma_mem:
1422 mv_port_free_dma_mem(ap);
1427 * mv_port_stop - Port specific cleanup/stop routine.
1428 * @ap: ATA channel to manipulate
1430 * Stop DMA, cleanup port memory.
1433 * This routine uses the host lock to protect the DMA stop.
1435 static void mv_port_stop(struct ata_port *ap)
1438 mv_enable_port_irqs(ap, 0);
1439 mv_port_free_dma_mem(ap);
1443 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1444 * @qc: queued command whose SG list to source from
1446 * Populate the SG list and mark the last entry.
1449 * Inherited from caller.
1451 static void mv_fill_sg(struct ata_queued_cmd *qc)
1453 struct mv_port_priv *pp = qc->ap->private_data;
1454 struct scatterlist *sg;
1455 struct mv_sg *mv_sg, *last_sg = NULL;
1458 mv_sg = pp->sg_tbl[qc->tag];
1459 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1460 dma_addr_t addr = sg_dma_address(sg);
1461 u32 sg_len = sg_dma_len(sg);
1464 u32 offset = addr & 0xffff;
1467 if (offset + len > 0x10000)
1468 len = 0x10000 - offset;
1470 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1471 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1472 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1473 mv_sg->reserved = 0;
1483 if (likely(last_sg))
1484 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1485 mb(); /* ensure data structure is visible to the chipset */
1488 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1490 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1491 (last ? CRQB_CMD_LAST : 0);
1492 *cmdw = cpu_to_le16(tmp);
1496 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1497 * @ap: Port associated with this ATA transaction.
1499 * We need this only for ATAPI bmdma transactions,
1500 * as otherwise we experience spurious interrupts
1501 * after libata-sff handles the bmdma interrupts.
1503 static void mv_sff_irq_clear(struct ata_port *ap)
1505 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1509 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1510 * @qc: queued command to check for chipset/DMA compatibility.
1512 * The bmdma engines cannot handle speculative data sizes
1513 * (bytecount under/over flow). So only allow DMA for
1514 * data transfer commands with known data sizes.
1517 * Inherited from caller.
1519 static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1521 struct scsi_cmnd *scmd = qc->scsicmd;
1524 switch (scmd->cmnd[0]) {
1532 case GPCMD_SEND_DVD_STRUCTURE:
1533 case GPCMD_SEND_CUE_SHEET:
1534 return 0; /* DMA is safe */
1537 return -EOPNOTSUPP; /* use PIO instead */
1541 * mv_bmdma_setup - Set up BMDMA transaction
1542 * @qc: queued command to prepare DMA for.
1545 * Inherited from caller.
1547 static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1549 struct ata_port *ap = qc->ap;
1550 void __iomem *port_mmio = mv_ap_base(ap);
1551 struct mv_port_priv *pp = ap->private_data;
1555 /* clear all DMA cmd bits */
1556 writel(0, port_mmio + BMDMA_CMD_OFS);
1558 /* load PRD table addr. */
1559 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1560 port_mmio + BMDMA_PRD_HIGH_OFS);
1561 writelfl(pp->sg_tbl_dma[qc->tag],
1562 port_mmio + BMDMA_PRD_LOW_OFS);
1564 /* issue r/w command */
1565 ap->ops->sff_exec_command(ap, &qc->tf);
1569 * mv_bmdma_start - Start a BMDMA transaction
1570 * @qc: queued command to start DMA on.
1573 * Inherited from caller.
1575 static void mv_bmdma_start(struct ata_queued_cmd *qc)
1577 struct ata_port *ap = qc->ap;
1578 void __iomem *port_mmio = mv_ap_base(ap);
1579 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1580 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1582 /* start host DMA transaction */
1583 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1587 * mv_bmdma_stop - Stop BMDMA transfer
1588 * @qc: queued command to stop DMA on.
1590 * Clears the ATA_DMA_START flag in the bmdma control register
1593 * Inherited from caller.
1595 static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1597 struct ata_port *ap = qc->ap;
1598 void __iomem *port_mmio = mv_ap_base(ap);
1601 /* clear start/stop bit */
1602 cmd = readl(port_mmio + BMDMA_CMD_OFS);
1603 cmd &= ~ATA_DMA_START;
1604 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1606 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1607 ata_sff_dma_pause(ap);
1611 * mv_bmdma_status - Read BMDMA status
1612 * @ap: port for which to retrieve DMA status.
1614 * Read and return equivalent of the sff BMDMA status register.
1617 * Inherited from caller.
1619 static u8 mv_bmdma_status(struct ata_port *ap)
1621 void __iomem *port_mmio = mv_ap_base(ap);
1625 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1626 * and the ATA_DMA_INTR bit doesn't exist.
1628 reg = readl(port_mmio + BMDMA_STATUS_OFS);
1629 if (reg & ATA_DMA_ACTIVE)
1630 status = ATA_DMA_ACTIVE;
1632 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1637 * mv_qc_prep - Host specific command preparation.
1638 * @qc: queued command to prepare
1640 * This routine simply redirects to the general purpose routine
1641 * if command is not DMA. Else, it handles prep of the CRQB
1642 * (command request block), does some sanity checking, and calls
1643 * the SG load routine.
1646 * Inherited from caller.
1648 static void mv_qc_prep(struct ata_queued_cmd *qc)
1650 struct ata_port *ap = qc->ap;
1651 struct mv_port_priv *pp = ap->private_data;
1653 struct ata_taskfile *tf;
1657 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1658 (qc->tf.protocol != ATA_PROT_NCQ))
1661 /* Fill in command request block
1663 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1664 flags |= CRQB_FLAG_READ;
1665 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1666 flags |= qc->tag << CRQB_TAG_SHIFT;
1667 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1669 /* get current queue index from software */
1670 in_index = pp->req_idx;
1672 pp->crqb[in_index].sg_addr =
1673 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1674 pp->crqb[in_index].sg_addr_hi =
1675 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1676 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1678 cw = &pp->crqb[in_index].ata_cmd[0];
1681 /* Sadly, the CRQB cannot accomodate all registers--there are
1682 * only 11 bytes...so we must pick and choose required
1683 * registers based on the command. So, we drop feature and
1684 * hob_feature for [RW] DMA commands, but they are needed for
1685 * NCQ. NCQ will drop hob_nsect, which is not needed there
1686 * (nsect is used only for the tag; feat/hob_feat hold true nsect).
1688 switch (tf->command) {
1690 case ATA_CMD_READ_EXT:
1692 case ATA_CMD_WRITE_EXT:
1693 case ATA_CMD_WRITE_FUA_EXT:
1694 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1696 case ATA_CMD_FPDMA_READ:
1697 case ATA_CMD_FPDMA_WRITE:
1698 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1699 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1702 /* The only other commands EDMA supports in non-queued and
1703 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1704 * of which are defined/used by Linux. If we get here, this
1705 * driver needs work.
1707 * FIXME: modify libata to give qc_prep a return value and
1708 * return error here.
1710 BUG_ON(tf->command);
1713 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1714 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1715 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1716 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1717 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1718 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1719 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1720 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1721 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1723 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1729 * mv_qc_prep_iie - Host specific command preparation.
1730 * @qc: queued command to prepare
1732 * This routine simply redirects to the general purpose routine
1733 * if command is not DMA. Else, it handles prep of the CRQB
1734 * (command request block), does some sanity checking, and calls
1735 * the SG load routine.
1738 * Inherited from caller.
1740 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1742 struct ata_port *ap = qc->ap;
1743 struct mv_port_priv *pp = ap->private_data;
1744 struct mv_crqb_iie *crqb;
1745 struct ata_taskfile *tf;
1749 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1750 (qc->tf.protocol != ATA_PROT_NCQ))
1753 /* Fill in Gen IIE command request block */
1754 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1755 flags |= CRQB_FLAG_READ;
1757 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1758 flags |= qc->tag << CRQB_TAG_SHIFT;
1759 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1760 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1762 /* get current queue index from software */
1763 in_index = pp->req_idx;
1765 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1766 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1767 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1768 crqb->flags = cpu_to_le32(flags);
1771 crqb->ata_cmd[0] = cpu_to_le32(
1772 (tf->command << 16) |
1775 crqb->ata_cmd[1] = cpu_to_le32(
1781 crqb->ata_cmd[2] = cpu_to_le32(
1782 (tf->hob_lbal << 0) |
1783 (tf->hob_lbam << 8) |
1784 (tf->hob_lbah << 16) |
1785 (tf->hob_feature << 24)
1787 crqb->ata_cmd[3] = cpu_to_le32(
1789 (tf->hob_nsect << 8)
1792 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1798 * mv_sff_check_status - fetch device status, if valid
1799 * @ap: ATA port to fetch status from
1801 * When using command issue via mv_qc_issue_fis(),
1802 * the initial ATA_BUSY state does not show up in the
1803 * ATA status (shadow) register. This can confuse libata!
1805 * So we have a hook here to fake ATA_BUSY for that situation,
1806 * until the first time a BUSY, DRQ, or ERR bit is seen.
1808 * The rest of the time, it simply returns the ATA status register.
1810 static u8 mv_sff_check_status(struct ata_port *ap)
1812 u8 stat = ioread8(ap->ioaddr.status_addr);
1813 struct mv_port_priv *pp = ap->private_data;
1815 if (pp->pp_flags & MV_PP_FLAG_FAKE_ATA_BUSY) {
1816 if (stat & (ATA_BUSY | ATA_DRQ | ATA_ERR))
1817 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY;
1825 * mv_send_fis - Send a FIS, using the "Vendor-Unique FIS" register
1826 * @fis: fis to be sent
1827 * @nwords: number of 32-bit words in the fis
1829 static unsigned int mv_send_fis(struct ata_port *ap, u32 *fis, int nwords)
1831 void __iomem *port_mmio = mv_ap_base(ap);
1832 u32 ifctl, old_ifctl, ifstat;
1833 int i, timeout = 200, final_word = nwords - 1;
1835 /* Initiate FIS transmission mode */
1836 old_ifctl = readl(port_mmio + SATA_IFCTL_OFS);
1837 ifctl = 0x100 | (old_ifctl & 0xf);
1838 writelfl(ifctl, port_mmio + SATA_IFCTL_OFS);
1840 /* Send all words of the FIS except for the final word */
1841 for (i = 0; i < final_word; ++i)
1842 writel(fis[i], port_mmio + VENDOR_UNIQUE_FIS_OFS);
1844 /* Flag end-of-transmission, and then send the final word */
1845 writelfl(ifctl | 0x200, port_mmio + SATA_IFCTL_OFS);
1846 writelfl(fis[final_word], port_mmio + VENDOR_UNIQUE_FIS_OFS);
1849 * Wait for FIS transmission to complete.
1850 * This typically takes just a single iteration.
1853 ifstat = readl(port_mmio + SATA_IFSTAT_OFS);
1854 } while (!(ifstat & 0x1000) && --timeout);
1856 /* Restore original port configuration */
1857 writelfl(old_ifctl, port_mmio + SATA_IFCTL_OFS);
1859 /* See if it worked */
1860 if ((ifstat & 0x3000) != 0x1000) {
1861 ata_port_printk(ap, KERN_WARNING,
1862 "%s transmission error, ifstat=%08x\n",
1864 return AC_ERR_OTHER;
1870 * mv_qc_issue_fis - Issue a command directly as a FIS
1871 * @qc: queued command to start
1873 * Note that the ATA shadow registers are not updated
1874 * after command issue, so the device will appear "READY"
1875 * if polled, even while it is BUSY processing the command.
1877 * So we use a status hook to fake ATA_BUSY until the drive changes state.
1879 * Note: we don't get updated shadow regs on *completion*
1880 * of non-data commands. So avoid sending them via this function,
1881 * as they will appear to have completed immediately.
1883 * GEN_IIE has special registers that we could get the result tf from,
1884 * but earlier chipsets do not. For now, we ignore those registers.
1886 static unsigned int mv_qc_issue_fis(struct ata_queued_cmd *qc)
1888 struct ata_port *ap = qc->ap;
1889 struct mv_port_priv *pp = ap->private_data;
1890 struct ata_link *link = qc->dev->link;
1894 ata_tf_to_fis(&qc->tf, link->pmp, 1, (void *)fis);
1895 err = mv_send_fis(ap, fis, sizeof(fis) / sizeof(fis[0]));
1899 switch (qc->tf.protocol) {
1900 case ATAPI_PROT_PIO:
1901 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
1903 case ATAPI_PROT_NODATA:
1904 ap->hsm_task_state = HSM_ST_FIRST;
1907 pp->pp_flags |= MV_PP_FLAG_FAKE_ATA_BUSY;
1908 if (qc->tf.flags & ATA_TFLAG_WRITE)
1909 ap->hsm_task_state = HSM_ST_FIRST;
1911 ap->hsm_task_state = HSM_ST;
1914 ap->hsm_task_state = HSM_ST_LAST;
1918 if (qc->tf.flags & ATA_TFLAG_POLLING)
1919 ata_pio_queue_task(ap, qc, 0);
1924 * mv_qc_issue - Initiate a command to the host
1925 * @qc: queued command to start
1927 * This routine simply redirects to the general purpose routine
1928 * if command is not DMA. Else, it sanity checks our local
1929 * caches of the request producer/consumer indices then enables
1930 * DMA and bumps the request producer index.
1933 * Inherited from caller.
1935 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1937 static int limit_warnings = 10;
1938 struct ata_port *ap = qc->ap;
1939 void __iomem *port_mmio = mv_ap_base(ap);
1940 struct mv_port_priv *pp = ap->private_data;
1942 unsigned int port_irqs;
1944 pp->pp_flags &= ~MV_PP_FLAG_FAKE_ATA_BUSY; /* paranoia */
1946 switch (qc->tf.protocol) {
1949 mv_start_edma(ap, port_mmio, pp, qc->tf.protocol);
1950 pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK;
1951 in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT;
1953 /* Write the request in pointer to kick the EDMA to life */
1954 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1955 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1960 * Errata SATA#16, SATA#24: warn if multiple DRQs expected.
1962 * Someday, we might implement special polling workarounds
1963 * for these, but it all seems rather unnecessary since we
1964 * normally use only DMA for commands which transfer more
1965 * than a single block of data.
1967 * Much of the time, this could just work regardless.
1968 * So for now, just log the incident, and allow the attempt.
1970 if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
1972 ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
1973 ": attempting PIO w/multiple DRQ: "
1974 "this may fail due to h/w errata\n");
1977 case ATA_PROT_NODATA:
1978 case ATAPI_PROT_PIO:
1979 case ATAPI_PROT_NODATA:
1980 if (ap->flags & ATA_FLAG_PIO_POLLING)
1981 qc->tf.flags |= ATA_TFLAG_POLLING;
1985 if (qc->tf.flags & ATA_TFLAG_POLLING)
1986 port_irqs = ERR_IRQ; /* mask device interrupt when polling */
1988 port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
1991 * We're about to send a non-EDMA capable command to the
1992 * port. Turn off EDMA so there won't be problems accessing
1993 * shadow block, etc registers.
1996 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
1997 mv_pmp_select(ap, qc->dev->link->pmp);
1999 if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
2000 struct mv_host_priv *hpriv = ap->host->private_data;
2002 * Workaround for 88SX60x1 FEr SATA#25 (part 2).
2004 * After any NCQ error, the READ_LOG_EXT command
2005 * from libata-eh *must* use mv_qc_issue_fis().
2006 * Otherwise it might fail, due to chip errata.
2008 * Rather than special-case it, we'll just *always*
2009 * use this method here for READ_LOG_EXT, making for
2012 if (IS_GEN_II(hpriv))
2013 return mv_qc_issue_fis(qc);
2015 return ata_sff_qc_issue(qc);
2018 static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
2020 struct mv_port_priv *pp = ap->private_data;
2021 struct ata_queued_cmd *qc;
2023 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
2025 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2027 if (qc->tf.flags & ATA_TFLAG_POLLING)
2029 else if (!(qc->flags & ATA_QCFLAG_ACTIVE))
2035 static void mv_pmp_error_handler(struct ata_port *ap)
2037 unsigned int pmp, pmp_map;
2038 struct mv_port_priv *pp = ap->private_data;
2040 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
2042 * Perform NCQ error analysis on failed PMPs
2043 * before we freeze the port entirely.
2045 * The failed PMPs are marked earlier by mv_pmp_eh_prep().
2047 pmp_map = pp->delayed_eh_pmp_map;
2048 pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
2049 for (pmp = 0; pmp_map != 0; pmp++) {
2050 unsigned int this_pmp = (1 << pmp);
2051 if (pmp_map & this_pmp) {
2052 struct ata_link *link = &ap->pmp_link[pmp];
2053 pmp_map &= ~this_pmp;
2054 ata_eh_analyze_ncq_error(link);
2057 ata_port_freeze(ap);
2059 sata_pmp_error_handler(ap);
2062 static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
2064 void __iomem *port_mmio = mv_ap_base(ap);
2066 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
2069 static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
2071 struct ata_eh_info *ehi;
2075 * Initialize EH info for PMPs which saw device errors
2077 ehi = &ap->link.eh_info;
2078 for (pmp = 0; pmp_map != 0; pmp++) {
2079 unsigned int this_pmp = (1 << pmp);
2080 if (pmp_map & this_pmp) {
2081 struct ata_link *link = &ap->pmp_link[pmp];
2083 pmp_map &= ~this_pmp;
2084 ehi = &link->eh_info;
2085 ata_ehi_clear_desc(ehi);
2086 ata_ehi_push_desc(ehi, "dev err");
2087 ehi->err_mask |= AC_ERR_DEV;
2088 ehi->action |= ATA_EH_RESET;
2089 ata_link_abort(link);
2094 static int mv_req_q_empty(struct ata_port *ap)
2096 void __iomem *port_mmio = mv_ap_base(ap);
2097 u32 in_ptr, out_ptr;
2099 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
2100 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2101 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
2102 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2103 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
2106 static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
2108 struct mv_port_priv *pp = ap->private_data;
2110 unsigned int old_map, new_map;
2113 * Device error during FBS+NCQ operation:
2115 * Set a port flag to prevent further I/O being enqueued.
2116 * Leave the EDMA running to drain outstanding commands from this port.
2117 * Perform the post-mortem/EH only when all responses are complete.
2118 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
2120 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
2121 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
2122 pp->delayed_eh_pmp_map = 0;
2124 old_map = pp->delayed_eh_pmp_map;
2125 new_map = old_map | mv_get_err_pmp_map(ap);
2127 if (old_map != new_map) {
2128 pp->delayed_eh_pmp_map = new_map;
2129 mv_pmp_eh_prep(ap, new_map & ~old_map);
2131 failed_links = hweight16(new_map);
2133 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
2134 "failed_links=%d nr_active_links=%d\n",
2135 __func__, pp->delayed_eh_pmp_map,
2136 ap->qc_active, failed_links,
2137 ap->nr_active_links);
2139 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
2140 mv_process_crpb_entries(ap, pp);
2143 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
2144 return 1; /* handled */
2146 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
2147 return 1; /* handled */
2150 static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
2153 * Possible future enhancement:
2155 * FBS+non-NCQ operation is not yet implemented.
2156 * See related notes in mv_edma_cfg().
2158 * Device error during FBS+non-NCQ operation:
2160 * We need to snapshot the shadow registers for each failed command.
2161 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
2163 return 0; /* not handled */
2166 static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
2168 struct mv_port_priv *pp = ap->private_data;
2170 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
2171 return 0; /* EDMA was not active: not handled */
2172 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
2173 return 0; /* FBS was not active: not handled */
2175 if (!(edma_err_cause & EDMA_ERR_DEV))
2176 return 0; /* non DEV error: not handled */
2177 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
2178 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
2179 return 0; /* other problems: not handled */
2181 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
2183 * EDMA should NOT have self-disabled for this case.
2184 * If it did, then something is wrong elsewhere,
2185 * and we cannot handle it here.
2187 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2188 ata_port_printk(ap, KERN_WARNING,
2189 "%s: err_cause=0x%x pp_flags=0x%x\n",
2190 __func__, edma_err_cause, pp->pp_flags);
2191 return 0; /* not handled */
2193 return mv_handle_fbs_ncq_dev_err(ap);
2196 * EDMA should have self-disabled for this case.
2197 * If it did not, then something is wrong elsewhere,
2198 * and we cannot handle it here.
2200 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
2201 ata_port_printk(ap, KERN_WARNING,
2202 "%s: err_cause=0x%x pp_flags=0x%x\n",
2203 __func__, edma_err_cause, pp->pp_flags);
2204 return 0; /* not handled */
2206 return mv_handle_fbs_non_ncq_dev_err(ap);
2208 return 0; /* not handled */
2211 static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
2213 struct ata_eh_info *ehi = &ap->link.eh_info;
2214 char *when = "idle";
2216 ata_ehi_clear_desc(ehi);
2217 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2219 } else if (edma_was_enabled) {
2220 when = "EDMA enabled";
2222 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
2223 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
2226 ata_ehi_push_desc(ehi, "unexpected device interrupt while %s", when);
2227 ehi->err_mask |= AC_ERR_OTHER;
2228 ehi->action |= ATA_EH_RESET;
2229 ata_port_freeze(ap);
2233 * mv_err_intr - Handle error interrupts on the port
2234 * @ap: ATA channel to manipulate
2236 * Most cases require a full reset of the chip's state machine,
2237 * which also performs a COMRESET.
2238 * Also, if the port disabled DMA, update our cached copy to match.
2241 * Inherited from caller.
2243 static void mv_err_intr(struct ata_port *ap)
2245 void __iomem *port_mmio = mv_ap_base(ap);
2246 u32 edma_err_cause, eh_freeze_mask, serr = 0;
2248 struct mv_port_priv *pp = ap->private_data;
2249 struct mv_host_priv *hpriv = ap->host->private_data;
2250 unsigned int action = 0, err_mask = 0;
2251 struct ata_eh_info *ehi = &ap->link.eh_info;
2252 struct ata_queued_cmd *qc;
2256 * Read and clear the SError and err_cause bits.
2257 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
2258 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
2260 sata_scr_read(&ap->link, SCR_ERROR, &serr);
2261 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
2263 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2264 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2265 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2266 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
2268 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2270 if (edma_err_cause & EDMA_ERR_DEV) {
2272 * Device errors during FIS-based switching operation
2273 * require special handling.
2275 if (mv_handle_dev_err(ap, edma_err_cause))
2279 qc = mv_get_active_qc(ap);
2280 ata_ehi_clear_desc(ehi);
2281 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
2282 edma_err_cause, pp->pp_flags);
2284 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
2285 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
2286 if (fis_cause & SATA_FIS_IRQ_AN) {
2287 u32 ec = edma_err_cause &
2288 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
2289 sata_async_notification(ap);
2291 return; /* Just an AN; no need for the nukes */
2292 ata_ehi_push_desc(ehi, "SDB notify");
2296 * All generations share these EDMA error cause bits:
2298 if (edma_err_cause & EDMA_ERR_DEV) {
2299 err_mask |= AC_ERR_DEV;
2300 action |= ATA_EH_RESET;
2301 ata_ehi_push_desc(ehi, "dev error");
2303 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
2304 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
2305 EDMA_ERR_INTRL_PAR)) {
2306 err_mask |= AC_ERR_ATA_BUS;
2307 action |= ATA_EH_RESET;
2308 ata_ehi_push_desc(ehi, "parity error");
2310 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
2311 ata_ehi_hotplugged(ehi);
2312 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
2313 "dev disconnect" : "dev connect");
2314 action |= ATA_EH_RESET;
2318 * Gen-I has a different SELF_DIS bit,
2319 * different FREEZE bits, and no SERR bit:
2321 if (IS_GEN_I(hpriv)) {
2322 eh_freeze_mask = EDMA_EH_FREEZE_5;
2323 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
2324 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2325 ata_ehi_push_desc(ehi, "EDMA self-disable");
2328 eh_freeze_mask = EDMA_EH_FREEZE;
2329 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
2330 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2331 ata_ehi_push_desc(ehi, "EDMA self-disable");
2333 if (edma_err_cause & EDMA_ERR_SERR) {
2334 ata_ehi_push_desc(ehi, "SError=%08x", serr);
2335 err_mask |= AC_ERR_ATA_BUS;
2336 action |= ATA_EH_RESET;
2341 err_mask = AC_ERR_OTHER;
2342 action |= ATA_EH_RESET;
2345 ehi->serror |= serr;
2346 ehi->action |= action;
2349 qc->err_mask |= err_mask;
2351 ehi->err_mask |= err_mask;
2353 if (err_mask == AC_ERR_DEV) {
2355 * Cannot do ata_port_freeze() here,
2356 * because it would kill PIO access,
2357 * which is needed for further diagnosis.
2361 } else if (edma_err_cause & eh_freeze_mask) {
2363 * Note to self: ata_port_freeze() calls ata_port_abort()
2365 ata_port_freeze(ap);
2372 ata_link_abort(qc->dev->link);
2378 static void mv_process_crpb_response(struct ata_port *ap,
2379 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
2381 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
2385 u16 edma_status = le16_to_cpu(response->flags);
2387 * edma_status from a response queue entry:
2388 * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only).
2389 * MSB is saved ATA status from command completion.
2392 u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV;
2395 * Error will be seen/handled by mv_err_intr().
2396 * So do nothing at all here.
2401 ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT;
2402 if (!ac_err_mask(ata_status))
2403 ata_qc_complete(qc);
2404 /* else: leave it for mv_err_intr() */
2406 ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n",
2411 static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp)
2413 void __iomem *port_mmio = mv_ap_base(ap);
2414 struct mv_host_priv *hpriv = ap->host->private_data;
2416 bool work_done = false;
2417 int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN);
2419 /* Get the hardware queue position index */
2420 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
2421 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
2423 /* Process new responses from since the last time we looked */
2424 while (in_index != pp->resp_idx) {
2426 struct mv_crpb *response = &pp->crpb[pp->resp_idx];
2428 pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK;
2430 if (IS_GEN_I(hpriv)) {
2431 /* 50xx: no NCQ, only one command active at a time */
2432 tag = ap->link.active_tag;
2434 /* Gen II/IIE: get command tag from CRPB entry */
2435 tag = le16_to_cpu(response->id) & 0x1f;
2437 mv_process_crpb_response(ap, response, tag, ncq_enabled);
2441 /* Update the software queue position index in hardware */
2443 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
2444 (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT),
2445 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
2448 static void mv_port_intr(struct ata_port *ap, u32 port_cause)
2450 struct mv_port_priv *pp;
2451 int edma_was_enabled;
2453 if (!ap || (ap->flags & ATA_FLAG_DISABLED)) {
2454 mv_unexpected_intr(ap, 0);
2458 * Grab a snapshot of the EDMA_EN flag setting,
2459 * so that we have a consistent view for this port,
2460 * even if something we call of our routines changes it.
2462 pp = ap->private_data;
2463 edma_was_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2465 * Process completed CRPB response(s) before other events.
2467 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
2468 mv_process_crpb_entries(ap, pp);
2469 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2470 mv_handle_fbs_ncq_dev_err(ap);
2473 * Handle chip-reported errors, or continue on to handle PIO.
2475 if (unlikely(port_cause & ERR_IRQ)) {
2477 } else if (!edma_was_enabled) {
2478 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
2480 ata_sff_host_intr(ap, qc);
2482 mv_unexpected_intr(ap, edma_was_enabled);
2487 * mv_host_intr - Handle all interrupts on the given host controller
2488 * @host: host specific structure
2489 * @main_irq_cause: Main interrupt cause register for the chip.
2492 * Inherited from caller.
2494 static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
2496 struct mv_host_priv *hpriv = host->private_data;
2497 void __iomem *mmio = hpriv->base, *hc_mmio;
2498 unsigned int handled = 0, port;
2500 for (port = 0; port < hpriv->n_ports; port++) {
2501 struct ata_port *ap = host->ports[port];
2502 unsigned int p, shift, hardport, port_cause;
2504 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2506 * Each hc within the host has its own hc_irq_cause register,
2507 * where the interrupting ports bits get ack'd.
2509 if (hardport == 0) { /* first port on this hc ? */
2510 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
2511 u32 port_mask, ack_irqs;
2513 * Skip this entire hc if nothing pending for any ports
2516 port += MV_PORTS_PER_HC - 1;
2520 * We don't need/want to read the hc_irq_cause register,
2521 * because doing so hurts performance, and
2522 * main_irq_cause already gives us everything we need.
2524 * But we do have to *write* to the hc_irq_cause to ack
2525 * the ports that we are handling this time through.
2527 * This requires that we create a bitmap for those
2528 * ports which interrupted us, and use that bitmap
2529 * to ack (only) those ports via hc_irq_cause.
2532 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
2533 if ((port + p) >= hpriv->n_ports)
2535 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
2536 if (hc_cause & port_mask)
2537 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
2539 hc_mmio = mv_hc_base_from_port(mmio, port);
2540 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
2544 * Handle interrupts signalled for this port:
2546 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
2548 mv_port_intr(ap, port_cause);
2553 static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
2555 struct mv_host_priv *hpriv = host->private_data;
2556 struct ata_port *ap;
2557 struct ata_queued_cmd *qc;
2558 struct ata_eh_info *ehi;
2559 unsigned int i, err_mask, printed = 0;
2562 err_cause = readl(mmio + hpriv->irq_cause_ofs);
2564 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
2567 DPRINTK("All regs @ PCI error\n");
2568 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
2570 writelfl(0, mmio + hpriv->irq_cause_ofs);
2572 for (i = 0; i < host->n_ports; i++) {
2573 ap = host->ports[i];
2574 if (!ata_link_offline(&ap->link)) {
2575 ehi = &ap->link.eh_info;
2576 ata_ehi_clear_desc(ehi);
2578 ata_ehi_push_desc(ehi,
2579 "PCI err cause 0x%08x", err_cause);
2580 err_mask = AC_ERR_HOST_BUS;
2581 ehi->action = ATA_EH_RESET;
2582 qc = ata_qc_from_tag(ap, ap->link.active_tag);
2584 qc->err_mask |= err_mask;
2586 ehi->err_mask |= err_mask;
2588 ata_port_freeze(ap);
2591 return 1; /* handled */
2595 * mv_interrupt - Main interrupt event handler
2597 * @dev_instance: private data; in this case the host structure
2599 * Read the read only register to determine if any host
2600 * controllers have pending interrupts. If so, call lower level
2601 * routine to handle. Also check for PCI errors which are only
2605 * This routine holds the host lock while processing pending
2608 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2610 struct ata_host *host = dev_instance;
2611 struct mv_host_priv *hpriv = host->private_data;
2612 unsigned int handled = 0;
2613 int using_msi = hpriv->hp_flags & MV_HP_FLAG_MSI;
2614 u32 main_irq_cause, pending_irqs;
2616 spin_lock(&host->lock);
2618 /* for MSI: block new interrupts while in here */
2620 writel(0, hpriv->main_irq_mask_addr);
2622 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2623 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2625 * Deal with cases where we either have nothing pending, or have read
2626 * a bogus register value which can indicate HW removal or PCI fault.
2628 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2629 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2630 handled = mv_pci_error(host, hpriv->base);
2632 handled = mv_host_intr(host, pending_irqs);
2635 /* for MSI: unmask; interrupt cause bits will retrigger now */
2637 writel(hpriv->main_irq_mask, hpriv->main_irq_mask_addr);
2639 spin_unlock(&host->lock);
2641 return IRQ_RETVAL(handled);
2644 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
2648 switch (sc_reg_in) {
2652 ofs = sc_reg_in * sizeof(u32);
2661 static int mv5_scr_read(struct ata_link *link, unsigned int sc_reg_in, u32 *val)
2663 struct mv_host_priv *hpriv = link->ap->host->private_data;
2664 void __iomem *mmio = hpriv->base;
2665 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2666 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2668 if (ofs != 0xffffffffU) {
2669 *val = readl(addr + ofs);
2675 static int mv5_scr_write(struct ata_link *link, unsigned int sc_reg_in, u32 val)
2677 struct mv_host_priv *hpriv = link->ap->host->private_data;
2678 void __iomem *mmio = hpriv->base;
2679 void __iomem *addr = mv5_phy_base(mmio, link->ap->port_no);
2680 unsigned int ofs = mv5_scr_offset(sc_reg_in);
2682 if (ofs != 0xffffffffU) {
2683 writelfl(val, addr + ofs);
2689 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
2691 struct pci_dev *pdev = to_pci_dev(host->dev);
2694 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
2697 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2699 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2702 mv_reset_pci_bus(host, mmio);
2705 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2707 writel(0x0fcfffff, mmio + MV_FLASH_CTL_OFS);
2710 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
2713 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
2716 tmp = readl(phy_mmio + MV5_PHY_MODE);
2718 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
2719 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
2722 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2726 writel(0, mmio + MV_GPIO_PORT_CTL_OFS);
2728 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
2730 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
2732 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
2735 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2738 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
2739 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
2741 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
2744 tmp = readl(phy_mmio + MV5_LTMODE_OFS);
2746 writel(tmp, phy_mmio + MV5_LTMODE_OFS);
2748 tmp = readl(phy_mmio + MV5_PHY_CTL_OFS);
2751 writel(tmp, phy_mmio + MV5_PHY_CTL_OFS);
2754 tmp = readl(phy_mmio + MV5_PHY_MODE);
2756 tmp |= hpriv->signal[port].pre;
2757 tmp |= hpriv->signal[port].amps;
2758 writel(tmp, phy_mmio + MV5_PHY_MODE);
2763 #define ZERO(reg) writel(0, port_mmio + (reg))
2764 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
2767 void __iomem *port_mmio = mv_port_base(mmio, port);
2769 mv_reset_channel(hpriv, mmio, port);
2771 ZERO(0x028); /* command */
2772 writel(0x11f, port_mmio + EDMA_CFG_OFS);
2773 ZERO(0x004); /* timer */
2774 ZERO(0x008); /* irq err cause */
2775 ZERO(0x00c); /* irq err mask */
2776 ZERO(0x010); /* rq bah */
2777 ZERO(0x014); /* rq inp */
2778 ZERO(0x018); /* rq outp */
2779 ZERO(0x01c); /* respq bah */
2780 ZERO(0x024); /* respq outp */
2781 ZERO(0x020); /* respq inp */
2782 ZERO(0x02c); /* test control */
2783 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
2787 #define ZERO(reg) writel(0, hc_mmio + (reg))
2788 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2791 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2799 tmp = readl(hc_mmio + 0x20);
2802 writel(tmp, hc_mmio + 0x20);
2806 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2809 unsigned int hc, port;
2811 for (hc = 0; hc < n_hc; hc++) {
2812 for (port = 0; port < MV_PORTS_PER_HC; port++)
2813 mv5_reset_hc_port(hpriv, mmio,
2814 (hc * MV_PORTS_PER_HC) + port);
2816 mv5_reset_one_hc(hpriv, mmio, hc);
2823 #define ZERO(reg) writel(0, mmio + (reg))
2824 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2826 struct mv_host_priv *hpriv = host->private_data;
2829 tmp = readl(mmio + MV_PCI_MODE_OFS);
2831 writel(tmp, mmio + MV_PCI_MODE_OFS);
2833 ZERO(MV_PCI_DISC_TIMER);
2834 ZERO(MV_PCI_MSI_TRIGGER);
2835 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2836 ZERO(MV_PCI_SERR_MASK);
2837 ZERO(hpriv->irq_cause_ofs);
2838 ZERO(hpriv->irq_mask_ofs);
2839 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2840 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2841 ZERO(MV_PCI_ERR_ATTRIBUTE);
2842 ZERO(MV_PCI_ERR_COMMAND);
2846 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2850 mv5_reset_flash(hpriv, mmio);
2852 tmp = readl(mmio + MV_GPIO_PORT_CTL_OFS);
2854 tmp |= (1 << 5) | (1 << 6);
2855 writel(tmp, mmio + MV_GPIO_PORT_CTL_OFS);
2859 * mv6_reset_hc - Perform the 6xxx global soft reset
2860 * @mmio: base address of the HBA
2862 * This routine only applies to 6xxx parts.
2865 * Inherited from caller.
2867 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2870 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2874 /* Following procedure defined in PCI "main command and status
2878 writel(t | STOP_PCI_MASTER, reg);
2880 for (i = 0; i < 1000; i++) {
2883 if (PCI_MASTER_EMPTY & t)
2886 if (!(PCI_MASTER_EMPTY & t)) {
2887 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2895 writel(t | GLOB_SFT_RST, reg);
2898 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2900 if (!(GLOB_SFT_RST & t)) {
2901 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2906 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2909 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2912 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2914 if (GLOB_SFT_RST & t) {
2915 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2922 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2925 void __iomem *port_mmio;
2928 tmp = readl(mmio + MV_RESET_CFG_OFS);
2929 if ((tmp & (1 << 0)) == 0) {
2930 hpriv->signal[idx].amps = 0x7 << 8;
2931 hpriv->signal[idx].pre = 0x1 << 5;
2935 port_mmio = mv_port_base(mmio, idx);
2936 tmp = readl(port_mmio + PHY_MODE2);
2938 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2939 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2942 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2944 writel(0x00000060, mmio + MV_GPIO_PORT_CTL_OFS);
2947 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2950 void __iomem *port_mmio = mv_port_base(mmio, port);
2952 u32 hp_flags = hpriv->hp_flags;
2954 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2956 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2959 if (fix_phy_mode2) {
2960 m2 = readl(port_mmio + PHY_MODE2);
2963 writel(m2, port_mmio + PHY_MODE2);
2967 m2 = readl(port_mmio + PHY_MODE2);
2968 m2 &= ~((1 << 16) | (1 << 31));
2969 writel(m2, port_mmio + PHY_MODE2);
2975 * Gen-II/IIe PHY_MODE3 errata RM#2:
2976 * Achieves better receiver noise performance than the h/w default:
2978 m3 = readl(port_mmio + PHY_MODE3);
2979 m3 = (m3 & 0x1f) | (0x5555601 << 5);
2981 /* Guideline 88F5182 (GL# SATA-S11) */
2985 if (fix_phy_mode4) {
2986 u32 m4 = readl(port_mmio + PHY_MODE4);
2988 * Enforce reserved-bit restrictions on GenIIe devices only.
2989 * For earlier chipsets, force only the internal config field
2990 * (workaround for errata FEr SATA#10 part 1).
2992 if (IS_GEN_IIE(hpriv))
2993 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
2995 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
2996 writel(m4, port_mmio + PHY_MODE4);
2999 * Workaround for 60x1-B2 errata SATA#13:
3000 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
3001 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
3003 writel(m3, port_mmio + PHY_MODE3);
3005 /* Revert values of pre-emphasis and signal amps to the saved ones */
3006 m2 = readl(port_mmio + PHY_MODE2);
3008 m2 &= ~MV_M2_PREAMP_MASK;
3009 m2 |= hpriv->signal[port].amps;
3010 m2 |= hpriv->signal[port].pre;
3013 /* according to mvSata 3.6.1, some IIE values are fixed */
3014 if (IS_GEN_IIE(hpriv)) {
3019 writel(m2, port_mmio + PHY_MODE2);
3022 /* TODO: use the generic LED interface to configure the SATA Presence */
3023 /* & Acitivy LEDs on the board */
3024 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
3030 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
3033 void __iomem *port_mmio;
3036 port_mmio = mv_port_base(mmio, idx);
3037 tmp = readl(port_mmio + PHY_MODE2);
3039 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
3040 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
3044 #define ZERO(reg) writel(0, port_mmio + (reg))
3045 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
3046 void __iomem *mmio, unsigned int port)
3048 void __iomem *port_mmio = mv_port_base(mmio, port);
3050 mv_reset_channel(hpriv, mmio, port);
3052 ZERO(0x028); /* command */
3053 writel(0x101f, port_mmio + EDMA_CFG_OFS);
3054 ZERO(0x004); /* timer */
3055 ZERO(0x008); /* irq err cause */
3056 ZERO(0x00c); /* irq err mask */
3057 ZERO(0x010); /* rq bah */
3058 ZERO(0x014); /* rq inp */
3059 ZERO(0x018); /* rq outp */
3060 ZERO(0x01c); /* respq bah */
3061 ZERO(0x024); /* respq outp */
3062 ZERO(0x020); /* respq inp */
3063 ZERO(0x02c); /* test control */
3064 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT_OFS);
3069 #define ZERO(reg) writel(0, hc_mmio + (reg))
3070 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
3073 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
3083 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
3084 void __iomem *mmio, unsigned int n_hc)
3088 for (port = 0; port < hpriv->n_ports; port++)
3089 mv_soc_reset_hc_port(hpriv, mmio, port);
3091 mv_soc_reset_one_hc(hpriv, mmio);
3096 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
3102 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
3107 static void mv_setup_ifcfg(void __iomem *port_mmio, int want_gen2i)
3109 u32 ifcfg = readl(port_mmio + SATA_INTERFACE_CFG_OFS);
3111 ifcfg = (ifcfg & 0xf7f) | 0x9b1000; /* from chip spec */
3113 ifcfg |= (1 << 7); /* enable gen2i speed */
3114 writelfl(ifcfg, port_mmio + SATA_INTERFACE_CFG_OFS);
3117 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
3118 unsigned int port_no)
3120 void __iomem *port_mmio = mv_port_base(mmio, port_no);
3123 * The datasheet warns against setting EDMA_RESET when EDMA is active
3124 * (but doesn't say what the problem might be). So we first try
3125 * to disable the EDMA engine before doing the EDMA_RESET operation.
3127 mv_stop_edma_engine(port_mmio);
3128 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
3130 if (!IS_GEN_I(hpriv)) {
3131 /* Enable 3.0gb/s link speed: this survives EDMA_RESET */
3132 mv_setup_ifcfg(port_mmio, 1);
3135 * Strobing EDMA_RESET here causes a hard reset of the SATA transport,
3136 * link, and physical layers. It resets all SATA interface registers
3137 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
3139 writelfl(EDMA_RESET, port_mmio + EDMA_CMD_OFS);
3140 udelay(25); /* allow reset propagation */
3141 writelfl(0, port_mmio + EDMA_CMD_OFS);
3143 hpriv->ops->phy_errata(hpriv, mmio, port_no);
3145 if (IS_GEN_I(hpriv))
3149 static void mv_pmp_select(struct ata_port *ap, int pmp)
3151 if (sata_pmp_supported(ap)) {
3152 void __iomem *port_mmio = mv_ap_base(ap);
3153 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
3154 int old = reg & 0xf;
3157 reg = (reg & ~0xf) | pmp;
3158 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
3163 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
3164 unsigned long deadline)
3166 mv_pmp_select(link->ap, sata_srst_pmp(link));
3167 return sata_std_hardreset(link, class, deadline);
3170 static int mv_softreset(struct ata_link *link, unsigned int *class,
3171 unsigned long deadline)
3173 mv_pmp_select(link->ap, sata_srst_pmp(link));
3174 return ata_sff_softreset(link, class, deadline);
3177 static int mv_hardreset(struct ata_link *link, unsigned int *class,
3178 unsigned long deadline)
3180 struct ata_port *ap = link->ap;
3181 struct mv_host_priv *hpriv = ap->host->private_data;
3182 struct mv_port_priv *pp = ap->private_data;
3183 void __iomem *mmio = hpriv->base;
3184 int rc, attempts = 0, extra = 0;
3188 mv_reset_channel(hpriv, mmio, ap->port_no);
3189 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
3191 ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN | MV_PP_FLAG_FAKE_ATA_BUSY);
3193 /* Workaround for errata FEr SATA#10 (part 2) */
3195 const unsigned long *timing =
3196 sata_ehc_deb_timing(&link->eh_context);
3198 rc = sata_link_hardreset(link, timing, deadline + extra,
3200 rc = online ? -EAGAIN : rc;
3203 sata_scr_read(link, SCR_STATUS, &sstatus);
3204 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
3205 /* Force 1.5gb/s link speed and try again */
3206 mv_setup_ifcfg(mv_ap_base(ap), 0);
3207 if (time_after(jiffies + HZ, deadline))
3208 extra = HZ; /* only extend it once, max */
3210 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
3211 mv_save_cached_regs(ap);
3212 mv_edma_cfg(ap, 0, 0);
3217 static void mv_eh_freeze(struct ata_port *ap)
3220 mv_enable_port_irqs(ap, 0);
3223 static void mv_eh_thaw(struct ata_port *ap)
3225 struct mv_host_priv *hpriv = ap->host->private_data;
3226 unsigned int port = ap->port_no;
3227 unsigned int hardport = mv_hardport_from_port(port);
3228 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
3229 void __iomem *port_mmio = mv_ap_base(ap);
3232 /* clear EDMA errors on this port */
3233 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3235 /* clear pending irq events */
3236 hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport);
3237 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
3239 mv_enable_port_irqs(ap, ERR_IRQ);
3243 * mv_port_init - Perform some early initialization on a single port.
3244 * @port: libata data structure storing shadow register addresses
3245 * @port_mmio: base address of the port
3247 * Initialize shadow register mmio addresses, clear outstanding
3248 * interrupts on the port, and unmask interrupts for the future
3249 * start of the port.
3252 * Inherited from caller.
3254 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
3256 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
3259 /* PIO related setup
3261 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
3263 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
3264 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
3265 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
3266 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
3267 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
3268 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
3270 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
3271 /* special case: control/altstatus doesn't have ATA_REG_ address */
3272 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
3275 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
3277 /* Clear any currently outstanding port interrupt conditions */
3278 serr_ofs = mv_scr_offset(SCR_ERROR);
3279 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
3280 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
3282 /* unmask all non-transient EDMA error interrupts */
3283 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
3285 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
3286 readl(port_mmio + EDMA_CFG_OFS),
3287 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
3288 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
3291 static unsigned int mv_in_pcix_mode(struct ata_host *host)
3293 struct mv_host_priv *hpriv = host->private_data;
3294 void __iomem *mmio = hpriv->base;
3297 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
3298 return 0; /* not PCI-X capable */
3299 reg = readl(mmio + MV_PCI_MODE_OFS);
3300 if ((reg & MV_PCI_MODE_MASK) == 0)
3301 return 0; /* conventional PCI mode */
3302 return 1; /* chip is in PCI-X mode */
3305 static int mv_pci_cut_through_okay(struct ata_host *host)
3307 struct mv_host_priv *hpriv = host->private_data;
3308 void __iomem *mmio = hpriv->base;
3311 if (!mv_in_pcix_mode(host)) {
3312 reg = readl(mmio + PCI_COMMAND_OFS);
3313 if (reg & PCI_COMMAND_MRDTRIG)
3314 return 0; /* not okay */
3316 return 1; /* okay */
3319 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
3321 struct pci_dev *pdev = to_pci_dev(host->dev);
3322 struct mv_host_priv *hpriv = host->private_data;
3323 u32 hp_flags = hpriv->hp_flags;
3325 switch (board_idx) {
3327 hpriv->ops = &mv5xxx_ops;
3328 hp_flags |= MV_HP_GEN_I;
3330 switch (pdev->revision) {
3332 hp_flags |= MV_HP_ERRATA_50XXB0;
3335 hp_flags |= MV_HP_ERRATA_50XXB2;
3338 dev_printk(KERN_WARNING, &pdev->dev,
3339 "Applying 50XXB2 workarounds to unknown rev\n");
3340 hp_flags |= MV_HP_ERRATA_50XXB2;
3347 hpriv->ops = &mv5xxx_ops;
3348 hp_flags |= MV_HP_GEN_I;
3350 switch (pdev->revision) {
3352 hp_flags |= MV_HP_ERRATA_50XXB0;
3355 hp_flags |= MV_HP_ERRATA_50XXB2;
3358 dev_printk(KERN_WARNING, &pdev->dev,
3359 "Applying B2 workarounds to unknown rev\n");
3360 hp_flags |= MV_HP_ERRATA_50XXB2;
3367 hpriv->ops = &mv6xxx_ops;
3368 hp_flags |= MV_HP_GEN_II;
3370 switch (pdev->revision) {
3372 hp_flags |= MV_HP_ERRATA_60X1B2;
3375 hp_flags |= MV_HP_ERRATA_60X1C0;
3378 dev_printk(KERN_WARNING, &pdev->dev,
3379 "Applying B2 workarounds to unknown rev\n");
3380 hp_flags |= MV_HP_ERRATA_60X1B2;
3386 hp_flags |= MV_HP_PCIE | MV_HP_CUT_THROUGH;
3387 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
3388 (pdev->device == 0x2300 || pdev->device == 0x2310))
3391 * Highpoint RocketRAID PCIe 23xx series cards:
3393 * Unconfigured drives are treated as "Legacy"
3394 * by the BIOS, and it overwrites sector 8 with
3395 * a "Lgcy" metadata block prior to Linux boot.
3397 * Configured drives (RAID or JBOD) leave sector 8
3398 * alone, but instead overwrite a high numbered
3399 * sector for the RAID metadata. This sector can
3400 * be determined exactly, by truncating the physical
3401 * drive capacity to a nice even GB value.
3403 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
3405 * Warn the user, lest they think we're just buggy.
3407 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
3408 " BIOS CORRUPTS DATA on all attached drives,"
3409 " regardless of if/how they are configured."
3411 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
3412 " use sectors 8-9 on \"Legacy\" drives,"
3413 " and avoid the final two gigabytes on"
3414 " all RocketRAID BIOS initialized drives.\n");
3418 hpriv->ops = &mv6xxx_ops;
3419 hp_flags |= MV_HP_GEN_IIE;
3420 if (board_idx == chip_6042 && mv_pci_cut_through_okay(host))
3421 hp_flags |= MV_HP_CUT_THROUGH;
3423 switch (pdev->revision) {
3424 case 0x2: /* Rev.B0: the first/only public release */
3425 hp_flags |= MV_HP_ERRATA_60X1C0;
3428 dev_printk(KERN_WARNING, &pdev->dev,
3429 "Applying 60X1C0 workarounds to unknown rev\n");
3430 hp_flags |= MV_HP_ERRATA_60X1C0;
3435 hpriv->ops = &mv_soc_ops;
3436 hp_flags |= MV_HP_FLAG_SOC | MV_HP_GEN_IIE |
3437 MV_HP_ERRATA_60X1C0;
3441 dev_printk(KERN_ERR, host->dev,
3442 "BUG: invalid board index %u\n", board_idx);
3446 hpriv->hp_flags = hp_flags;
3447 if (hp_flags & MV_HP_PCIE) {
3448 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
3449 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
3450 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
3452 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
3453 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
3454 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
3461 * mv_init_host - Perform some early initialization of the host.
3462 * @host: ATA host to initialize
3463 * @board_idx: controller index
3465 * If possible, do an early global reset of the host. Then do
3466 * our port init and clear/unmask all/relevant host interrupts.
3469 * Inherited from caller.
3471 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3473 int rc = 0, n_hc, port, hc;
3474 struct mv_host_priv *hpriv = host->private_data;
3475 void __iomem *mmio = hpriv->base;
3477 rc = mv_chip_id(host, board_idx);
3481 if (IS_SOC(hpriv)) {
3482 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3483 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
3485 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3486 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3489 /* initialize shadow irq mask with register's value */
3490 hpriv->main_irq_mask = readl(hpriv->main_irq_mask_addr);
3492 /* global interrupt mask: 0 == mask everything */
3493 mv_set_main_irq_mask(host, ~0, 0);
3495 n_hc = mv_get_hc_count(host->ports[0]->flags);
3497 for (port = 0; port < host->n_ports; port++)
3498 hpriv->ops->read_preamp(hpriv, port, mmio);
3500 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
3504 hpriv->ops->reset_flash(hpriv, mmio);
3505 hpriv->ops->reset_bus(host, mmio);
3506 hpriv->ops->enable_leds(hpriv, mmio);
3508 for (port = 0; port < host->n_ports; port++) {
3509 struct ata_port *ap = host->ports[port];
3510 void __iomem *port_mmio = mv_port_base(mmio, port);
3512 mv_port_init(&ap->ioaddr, port_mmio);
3515 if (!IS_SOC(hpriv)) {
3516 unsigned int offset = port_mmio - mmio;
3517 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3518 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
3523 for (hc = 0; hc < n_hc; hc++) {
3524 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
3526 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
3527 "(before clear)=0x%08x\n", hc,
3528 readl(hc_mmio + HC_CFG_OFS),
3529 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
3531 /* Clear any currently outstanding hc interrupt conditions */
3532 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3535 /* Clear any currently outstanding host interrupt conditions */
3536 writelfl(0, mmio + hpriv->irq_cause_ofs);
3538 /* and unmask interrupt generation for host regs */
3539 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3542 * enable only global host interrupts for now.
3543 * The per-port interrupts get done later as ports are set up.
3545 mv_set_main_irq_mask(host, 0, PCI_ERR);
3550 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
3552 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
3554 if (!hpriv->crqb_pool)
3557 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
3559 if (!hpriv->crpb_pool)
3562 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
3564 if (!hpriv->sg_tbl_pool)
3570 static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
3571 struct mbus_dram_target_info *dram)
3575 for (i = 0; i < 4; i++) {
3576 writel(0, hpriv->base + WINDOW_CTRL(i));
3577 writel(0, hpriv->base + WINDOW_BASE(i));
3580 for (i = 0; i < dram->num_cs; i++) {
3581 struct mbus_dram_window *cs = dram->cs + i;
3583 writel(((cs->size - 1) & 0xffff0000) |
3584 (cs->mbus_attr << 8) |
3585 (dram->mbus_dram_target_id << 4) | 1,
3586 hpriv->base + WINDOW_CTRL(i));
3587 writel(cs->base, hpriv->base + WINDOW_BASE(i));
3592 * mv_platform_probe - handle a positive probe of an soc Marvell
3594 * @pdev: platform device found
3597 * Inherited from caller.
3599 static int mv_platform_probe(struct platform_device *pdev)
3601 static int printed_version;
3602 const struct mv_sata_platform_data *mv_platform_data;
3603 const struct ata_port_info *ppi[] =
3604 { &mv_port_info[chip_soc], NULL };
3605 struct ata_host *host;
3606 struct mv_host_priv *hpriv;
3607 struct resource *res;
3610 if (!printed_version++)
3611 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3614 * Simple resource validation ..
3616 if (unlikely(pdev->num_resources != 2)) {
3617 dev_err(&pdev->dev, "invalid number of resources\n");
3622 * Get the register base first
3624 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3629 mv_platform_data = pdev->dev.platform_data;
3630 n_ports = mv_platform_data->n_ports;
3632 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3633 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3635 if (!host || !hpriv)
3637 host->private_data = hpriv;
3638 hpriv->n_ports = n_ports;
3641 hpriv->base = devm_ioremap(&pdev->dev, res->start,
3642 res->end - res->start + 1);
3643 hpriv->base -= MV_SATAHC0_REG_BASE;
3646 * (Re-)program MBUS remapping windows if we are asked to.
3648 if (mv_platform_data->dram != NULL)
3649 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
3651 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3655 /* initialize adapter */
3656 rc = mv_init_host(host, chip_soc);
3660 dev_printk(KERN_INFO, &pdev->dev,
3661 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
3664 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
3665 IRQF_SHARED, &mv6_sht);
3670 * mv_platform_remove - unplug a platform interface
3671 * @pdev: platform device
3673 * A platform bus SATA device has been unplugged. Perform the needed
3674 * cleanup. Also called on module unload for any active devices.
3676 static int __devexit mv_platform_remove(struct platform_device *pdev)
3678 struct device *dev = &pdev->dev;
3679 struct ata_host *host = dev_get_drvdata(dev);
3681 ata_host_detach(host);
3685 static struct platform_driver mv_platform_driver = {
3686 .probe = mv_platform_probe,
3687 .remove = __devexit_p(mv_platform_remove),
3690 .owner = THIS_MODULE,
3696 static int mv_pci_init_one(struct pci_dev *pdev,
3697 const struct pci_device_id *ent);
3700 static struct pci_driver mv_pci_driver = {
3702 .id_table = mv_pci_tbl,
3703 .probe = mv_pci_init_one,
3704 .remove = ata_pci_remove_one,
3710 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
3713 /* move to PCI layer or libata core? */
3714 static int pci_go_64(struct pci_dev *pdev)
3718 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
3719 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
3721 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3723 dev_printk(KERN_ERR, &pdev->dev,
3724 "64-bit DMA enable failed\n");
3729 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
3731 dev_printk(KERN_ERR, &pdev->dev,
3732 "32-bit DMA enable failed\n");
3735 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
3737 dev_printk(KERN_ERR, &pdev->dev,
3738 "32-bit consistent DMA enable failed\n");
3747 * mv_print_info - Dump key info to kernel log for perusal.
3748 * @host: ATA host to print info about
3750 * FIXME: complete this.
3753 * Inherited from caller.
3755 static void mv_print_info(struct ata_host *host)
3757 struct pci_dev *pdev = to_pci_dev(host->dev);
3758 struct mv_host_priv *hpriv = host->private_data;
3760 const char *scc_s, *gen;
3762 /* Use this to determine the HW stepping of the chip so we know
3763 * what errata to workaround
3765 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3768 else if (scc == 0x01)
3773 if (IS_GEN_I(hpriv))
3775 else if (IS_GEN_II(hpriv))
3777 else if (IS_GEN_IIE(hpriv))
3782 dev_printk(KERN_INFO, &pdev->dev,
3783 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3784 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
3785 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3789 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
3790 * @pdev: PCI device found
3791 * @ent: PCI device ID entry for the matched host
3794 * Inherited from caller.
3796 static int mv_pci_init_one(struct pci_dev *pdev,
3797 const struct pci_device_id *ent)
3799 static int printed_version;
3800 unsigned int board_idx = (unsigned int)ent->driver_data;
3801 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3802 struct ata_host *host;
3803 struct mv_host_priv *hpriv;
3806 if (!printed_version++)
3807 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
3810 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3812 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3813 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3814 if (!host || !hpriv)
3816 host->private_data = hpriv;
3817 hpriv->n_ports = n_ports;
3819 /* acquire resources */
3820 rc = pcim_enable_device(pdev);
3824 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3826 pcim_pin_device(pdev);
3829 host->iomap = pcim_iomap_table(pdev);
3830 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3832 rc = pci_go_64(pdev);
3836 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3840 /* initialize adapter */
3841 rc = mv_init_host(host, board_idx);
3845 /* Enable message-switched interrupts, if requested */
3846 if (msi && pci_enable_msi(pdev) == 0)
3847 hpriv->hp_flags |= MV_HP_FLAG_MSI;
3849 mv_dump_pci_cfg(pdev, 0x68);
3850 mv_print_info(host);
3852 pci_set_master(pdev);
3853 pci_try_set_mwi(pdev);
3854 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3855 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3859 static int mv_platform_probe(struct platform_device *pdev);
3860 static int __devexit mv_platform_remove(struct platform_device *pdev);
3862 static int __init mv_init(void)
3866 rc = pci_register_driver(&mv_pci_driver);
3870 rc = platform_driver_register(&mv_platform_driver);
3874 pci_unregister_driver(&mv_pci_driver);
3879 static void __exit mv_exit(void)
3882 pci_unregister_driver(&mv_pci_driver);
3884 platform_driver_unregister(&mv_platform_driver);
3887 MODULE_AUTHOR("Brett Russ");
3888 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3889 MODULE_LICENSE("GPL");
3890 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3891 MODULE_VERSION(DRV_VERSION);
3892 MODULE_ALIAS("platform:" DRV_NAME);
3895 module_param(msi, int, 0444);
3896 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3899 module_init(mv_init);
3900 module_exit(mv_exit);