2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <scsi/scsi_host.h>
80 #include <scsi/scsi_cmnd.h>
81 #include <scsi/scsi_device.h>
82 #include <linux/libata.h>
84 #define DRV_NAME "sata_mv"
85 #define DRV_VERSION "1.20"
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
98 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104 MV_SATAHC0_REG_BASE = 0x20000,
105 MV_FLASH_CTL = 0x1046c,
106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
135 /* SoC integrated controllers, no PCI interface */
136 MV_FLAG_SOC = (1 << 28),
138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
143 CRQB_FLAG_READ = (1 << 0),
145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
152 CRPB_FLAG_STATUS_SHIFT = 8,
153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
158 /* PCI interface registers */
160 PCI_COMMAND_OFS = 0xc00,
162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
213 /* SATAHC registers */
216 HC_IRQ_CAUSE_OFS = 0x14,
217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
221 /* Shadow block registers */
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
246 SATA_INTERFACE_CFG = 0x050,
248 MV_M2_PREAMP_MASK = 0x7e0,
252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
341 EDMA_RSP_Q_PTR_SHIFT = 3,
343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
348 EDMA_IORDY_TMOUT = 0x34,
351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
357 MV_HP_ERRATA_XX42A0 = (1 << 5),
358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
363 /* Port private flags (pp_flags) */
364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
368 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
370 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
371 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
377 MV_DMA_BOUNDARY = 0xffffU,
379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
384 /* ditto, for response queue */
385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
399 /* Command ReQuest Block: 32B */
415 /* Command ResPonse Block: 8B */
422 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430 struct mv_port_priv {
431 struct mv_crqb *crqb;
433 struct mv_crpb *crpb;
435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
438 unsigned int req_idx;
439 unsigned int resp_idx;
444 struct mv_port_signal {
449 struct mv_host_priv {
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
482 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
486 static int mv_port_start(struct ata_port *ap);
487 static void mv_port_stop(struct ata_port *ap);
488 static void mv_qc_prep(struct ata_queued_cmd *qc);
489 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
490 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
491 static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
493 static void mv_eh_freeze(struct ata_port *ap);
494 static void mv_eh_thaw(struct ata_port *ap);
495 static void mv6_dev_config(struct ata_device *dev);
497 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
499 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
502 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
504 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
505 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
509 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
512 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
514 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
515 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
517 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
519 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
523 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
524 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
525 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
526 unsigned int port_no);
527 static int mv_stop_edma(struct ata_port *ap);
528 static int mv_stop_edma_engine(void __iomem *port_mmio);
529 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
531 static void mv_pmp_select(struct ata_port *ap, int pmp);
532 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
533 unsigned long deadline);
534 static int mv_softreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline);
537 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
538 * because we have to allow room for worst case splitting of
539 * PRDs for 64K boundaries in mv_fill_sg().
541 static struct scsi_host_template mv5_sht = {
542 ATA_BASE_SHT(DRV_NAME),
543 .sg_tablesize = MV_MAX_SG_CT / 2,
544 .dma_boundary = MV_DMA_BOUNDARY,
547 static struct scsi_host_template mv6_sht = {
548 ATA_NCQ_SHT(DRV_NAME),
549 .can_queue = MV_MAX_Q_DEPTH - 1,
550 .sg_tablesize = MV_MAX_SG_CT / 2,
551 .dma_boundary = MV_DMA_BOUNDARY,
554 static struct ata_port_operations mv5_ops = {
555 .inherits = &ata_sff_port_ops,
557 .qc_prep = mv_qc_prep,
558 .qc_issue = mv_qc_issue,
560 .freeze = mv_eh_freeze,
562 .hardreset = mv_hardreset,
563 .error_handler = ata_std_error_handler, /* avoid SFF EH */
564 .post_internal_cmd = ATA_OP_NULL,
566 .scr_read = mv5_scr_read,
567 .scr_write = mv5_scr_write,
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
573 static struct ata_port_operations mv6_ops = {
574 .inherits = &mv5_ops,
575 .qc_defer = sata_pmp_qc_defer_cmd_switch,
576 .dev_config = mv6_dev_config,
577 .scr_read = mv_scr_read,
578 .scr_write = mv_scr_write,
580 .pmp_hardreset = mv_pmp_hardreset,
581 .pmp_softreset = mv_softreset,
582 .softreset = mv_softreset,
583 .error_handler = sata_pmp_error_handler,
586 static struct ata_port_operations mv_iie_ops = {
587 .inherits = &mv6_ops,
588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
589 .dev_config = ATA_OP_NULL,
590 .qc_prep = mv_qc_prep_iie,
593 static const struct ata_port_info mv_port_info[] = {
595 .flags = MV_COMMON_FLAGS,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv5_ops,
601 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
602 .pio_mask = 0x1f, /* pio0-4 */
603 .udma_mask = ATA_UDMA6,
604 .port_ops = &mv5_ops,
607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
608 .pio_mask = 0x1f, /* pio0-4 */
609 .udma_mask = ATA_UDMA6,
610 .port_ops = &mv5_ops,
613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
616 .pio_mask = 0x1f, /* pio0-4 */
617 .udma_mask = ATA_UDMA6,
618 .port_ops = &mv6_ops,
621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv6_ops,
629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
632 .pio_mask = 0x1f, /* pio0-4 */
633 .udma_mask = ATA_UDMA6,
634 .port_ops = &mv_iie_ops,
637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
640 .pio_mask = 0x1f, /* pio0-4 */
641 .udma_mask = ATA_UDMA6,
642 .port_ops = &mv_iie_ops,
645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
647 ATA_FLAG_NCQ | MV_FLAG_SOC,
648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv_iie_ops,
654 static const struct pci_device_id mv_pci_tbl[] = {
655 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
657 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
658 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
659 /* RocketRAID 1740/174x have different identifiers */
660 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
661 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
663 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
665 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
666 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
667 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
669 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
672 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
674 /* Marvell 7042 support */
675 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
677 /* Highpoint RocketRAID PCIe series */
678 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
679 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
681 { } /* terminate list */
684 static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
693 static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
702 static const struct mv_hw_ops mv_soc_ops = {
703 .phy_errata = mv6_phy_errata,
704 .enable_leds = mv_soc_enable_leds,
705 .read_preamp = mv_soc_read_preamp,
706 .reset_hc = mv_soc_reset_hc,
707 .reset_flash = mv_soc_reset_flash,
708 .reset_bus = mv_soc_reset_bus,
715 static inline void writelfl(unsigned long data, void __iomem *addr)
718 (void) readl(addr); /* flush to avoid PCI posted write */
721 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
726 static inline unsigned int mv_hc_from_port(unsigned int port)
728 return port >> MV_PORT_HC_SHIFT;
731 static inline unsigned int mv_hardport_from_port(unsigned int port)
733 return port & MV_PORT_MASK;
736 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
739 return mv_hc_base(base, mv_hc_from_port(port));
742 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
744 return mv_hc_base_from_port(base, port) +
745 MV_SATAHC_ARBTR_REG_SZ +
746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
749 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
754 return hc_mmio + ofs;
757 static inline void __iomem *mv_host_base(struct ata_host *host)
759 struct mv_host_priv *hpriv = host->private_data;
763 static inline void __iomem *mv_ap_base(struct ata_port *ap)
765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
768 static inline int mv_get_hc_count(unsigned long port_flags)
770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
773 static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
780 * initialize request queue
782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790 writelfl((pp->crqb_dma & 0xffffffff) | index,
791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
796 * initialize response queue
798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
804 writelfl((pp->crpb_dma & 0xffffffff) | index,
805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
818 * Verify the local cache of the eDMA state is accurate with a
822 * Inherited from caller.
824 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
825 struct mv_port_priv *pp, u8 protocol)
827 int want_ncq = (protocol == ATA_PROT_NCQ);
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
838 mv_host_base(ap->host), hard_port);
839 u32 hc_irq_cause, ipending;
841 /* clear EDMA event indicators, if any */
842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
853 mv_edma_cfg(ap, want_ncq);
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
867 * mv_stop_edma_engine - Disable eDMA engine
868 * @port_mmio: io base address
871 * Inherited from caller.
873 static int mv_stop_edma_engine(void __iomem *port_mmio)
877 /* Disable eDMA. The disable bit auto clears. */
878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
880 /* Wait for the chip to confirm eDMA is off. */
881 for (i = 10000; i > 0; i--) {
882 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
883 if (!(reg & EDMA_EN))
890 static int mv_stop_edma(struct ata_port *ap)
892 void __iomem *port_mmio = mv_ap_base(ap);
893 struct mv_port_priv *pp = ap->private_data;
895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 if (mv_stop_edma_engine(port_mmio)) {
899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
906 static void mv_dump_mem(void __iomem *start, unsigned bytes)
909 for (b = 0; b < bytes; ) {
910 DPRINTK("%p: ", start + b);
911 for (w = 0; b < bytes && w < 4; w++) {
912 printk("%08x ", readl(start + b));
920 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%02x: ", b);
927 for (w = 0; b < bytes && w < 4; w++) {
928 (void) pci_read_config_dword(pdev, b, &dw);
936 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
937 struct pci_dev *pdev)
940 void __iomem *hc_base = mv_hc_base(mmio_base,
941 port >> MV_PORT_HC_SHIFT);
942 void __iomem *port_base;
943 int start_port, num_ports, p, start_hc, num_hcs, hc;
946 start_hc = start_port = 0;
947 num_ports = 8; /* shld be benign for 4 port devs */
950 start_hc = port >> MV_PORT_HC_SHIFT;
952 num_ports = num_hcs = 1;
954 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
955 num_ports > 1 ? num_ports - 1 : start_port);
958 DPRINTK("PCI config space regs:\n");
959 mv_dump_pci_cfg(pdev, 0x68);
961 DPRINTK("PCI regs:\n");
962 mv_dump_mem(mmio_base+0xc00, 0x3c);
963 mv_dump_mem(mmio_base+0xd00, 0x34);
964 mv_dump_mem(mmio_base+0xf00, 0x4);
965 mv_dump_mem(mmio_base+0x1d00, 0x6c);
966 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
967 hc_base = mv_hc_base(mmio_base, hc);
968 DPRINTK("HC regs (HC %i):\n", hc);
969 mv_dump_mem(hc_base, 0x1c);
971 for (p = start_port; p < start_port + num_ports; p++) {
972 port_base = mv_port_base(mmio_base, p);
973 DPRINTK("EDMA regs (port %i):\n", p);
974 mv_dump_mem(port_base, 0x54);
975 DPRINTK("SATA regs (port %i):\n", p);
976 mv_dump_mem(port_base+0x300, 0x60);
981 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
989 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
992 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1001 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1003 unsigned int ofs = mv_scr_offset(sc_reg_in);
1005 if (ofs != 0xffffffffU) {
1006 *val = readl(mv_ap_base(ap) + ofs);
1012 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1014 unsigned int ofs = mv_scr_offset(sc_reg_in);
1016 if (ofs != 0xffffffffU) {
1017 writelfl(val, mv_ap_base(ap) + ofs);
1023 static void mv6_dev_config(struct ata_device *adev)
1026 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1028 * Gen-II does not support NCQ over a port multiplier
1029 * (no FIS-based switching).
1031 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1032 * See mv_qc_prep() for more info.
1034 if (adev->flags & ATA_DFLAG_NCQ) {
1035 if (sata_pmp_attached(adev->link->ap))
1036 adev->flags &= ~ATA_DFLAG_NCQ;
1037 else if (adev->max_sectors > ATA_MAX_SECTORS)
1038 adev->max_sectors = ATA_MAX_SECTORS;
1042 static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1046 * Various bit settings required for operation
1047 * in FIS-based switching (fbs) mode on GenIIe:
1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1050 old_ltmode = readl(port_mmio + LTMODE_OFS);
1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1053 new_ltmode = old_ltmode | LTMODE_BIT8;
1054 } else { /* disable fbs */
1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1056 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1058 if (new_fcfg != old_fcfg)
1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1060 if (new_ltmode != old_ltmode)
1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
1064 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1067 struct mv_port_priv *pp = ap->private_data;
1068 struct mv_host_priv *hpriv = ap->host->private_data;
1069 void __iomem *port_mmio = mv_ap_base(ap);
1071 /* set up non-NCQ EDMA configuration */
1072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1074 if (IS_GEN_I(hpriv))
1075 cfg |= (1 << 8); /* enab config burst size mask */
1077 else if (IS_GEN_II(hpriv))
1078 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1080 else if (IS_GEN_IIE(hpriv)) {
1081 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1083 cfg |= (1 << 18); /* enab early completion */
1084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1086 if (want_ncq && sata_pmp_attached(ap)) {
1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1088 mv_config_fbs(port_mmio, 1);
1090 mv_config_fbs(port_mmio, 0);
1095 cfg |= EDMA_CFG_NCQ;
1096 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1098 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1100 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1103 static void mv_port_free_dma_mem(struct ata_port *ap)
1105 struct mv_host_priv *hpriv = ap->host->private_data;
1106 struct mv_port_priv *pp = ap->private_data;
1110 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1114 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1118 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1119 * For later hardware, we have one unique sg_tbl per NCQ tag.
1121 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1122 if (pp->sg_tbl[tag]) {
1123 if (tag == 0 || !IS_GEN_I(hpriv))
1124 dma_pool_free(hpriv->sg_tbl_pool,
1126 pp->sg_tbl_dma[tag]);
1127 pp->sg_tbl[tag] = NULL;
1133 * mv_port_start - Port specific init/start routine.
1134 * @ap: ATA channel to manipulate
1136 * Allocate and point to DMA memory, init port private memory,
1140 * Inherited from caller.
1142 static int mv_port_start(struct ata_port *ap)
1144 struct device *dev = ap->host->dev;
1145 struct mv_host_priv *hpriv = ap->host->private_data;
1146 struct mv_port_priv *pp;
1149 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1152 ap->private_data = pp;
1154 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1157 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1159 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1161 goto out_port_free_dma_mem;
1162 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1165 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1166 * For later hardware, we need one unique sg_tbl per NCQ tag.
1168 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1169 if (tag == 0 || !IS_GEN_I(hpriv)) {
1170 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1171 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1172 if (!pp->sg_tbl[tag])
1173 goto out_port_free_dma_mem;
1175 pp->sg_tbl[tag] = pp->sg_tbl[0];
1176 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1181 out_port_free_dma_mem:
1182 mv_port_free_dma_mem(ap);
1187 * mv_port_stop - Port specific cleanup/stop routine.
1188 * @ap: ATA channel to manipulate
1190 * Stop DMA, cleanup port memory.
1193 * This routine uses the host lock to protect the DMA stop.
1195 static void mv_port_stop(struct ata_port *ap)
1198 mv_port_free_dma_mem(ap);
1202 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1203 * @qc: queued command whose SG list to source from
1205 * Populate the SG list and mark the last entry.
1208 * Inherited from caller.
1210 static void mv_fill_sg(struct ata_queued_cmd *qc)
1212 struct mv_port_priv *pp = qc->ap->private_data;
1213 struct scatterlist *sg;
1214 struct mv_sg *mv_sg, *last_sg = NULL;
1217 mv_sg = pp->sg_tbl[qc->tag];
1218 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1219 dma_addr_t addr = sg_dma_address(sg);
1220 u32 sg_len = sg_dma_len(sg);
1223 u32 offset = addr & 0xffff;
1226 if ((offset + sg_len > 0x10000))
1227 len = 0x10000 - offset;
1229 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1230 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1231 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1241 if (likely(last_sg))
1242 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1245 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1247 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1248 (last ? CRQB_CMD_LAST : 0);
1249 *cmdw = cpu_to_le16(tmp);
1253 * mv_qc_prep - Host specific command preparation.
1254 * @qc: queued command to prepare
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1262 * Inherited from caller.
1264 static void mv_qc_prep(struct ata_queued_cmd *qc)
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
1269 struct ata_taskfile *tf;
1273 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1274 (qc->tf.protocol != ATA_PROT_NCQ))
1277 /* Fill in command request block
1279 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1280 flags |= CRQB_FLAG_READ;
1281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1282 flags |= qc->tag << CRQB_TAG_SHIFT;
1283 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1285 /* get current queue index from software */
1286 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1288 pp->crqb[in_index].sg_addr =
1289 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1290 pp->crqb[in_index].sg_addr_hi =
1291 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1292 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1294 cw = &pp->crqb[in_index].ata_cmd[0];
1297 /* Sadly, the CRQB cannot accomodate all registers--there are
1298 * only 11 bytes...so we must pick and choose required
1299 * registers based on the command. So, we drop feature and
1300 * hob_feature for [RW] DMA commands, but they are needed for
1301 * NCQ. NCQ will drop hob_nsect.
1303 switch (tf->command) {
1305 case ATA_CMD_READ_EXT:
1307 case ATA_CMD_WRITE_EXT:
1308 case ATA_CMD_WRITE_FUA_EXT:
1309 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1311 case ATA_CMD_FPDMA_READ:
1312 case ATA_CMD_FPDMA_WRITE:
1313 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1314 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1317 /* The only other commands EDMA supports in non-queued and
1318 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1319 * of which are defined/used by Linux. If we get here, this
1320 * driver needs work.
1322 * FIXME: modify libata to give qc_prep a return value and
1323 * return error here.
1325 BUG_ON(tf->command);
1328 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1329 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1330 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1331 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1332 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1335 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1336 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1344 * mv_qc_prep_iie - Host specific command preparation.
1345 * @qc: queued command to prepare
1347 * This routine simply redirects to the general purpose routine
1348 * if command is not DMA. Else, it handles prep of the CRQB
1349 * (command request block), does some sanity checking, and calls
1350 * the SG load routine.
1353 * Inherited from caller.
1355 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1357 struct ata_port *ap = qc->ap;
1358 struct mv_port_priv *pp = ap->private_data;
1359 struct mv_crqb_iie *crqb;
1360 struct ata_taskfile *tf;
1364 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1365 (qc->tf.protocol != ATA_PROT_NCQ))
1368 /* Fill in Gen IIE command request block */
1369 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1370 flags |= CRQB_FLAG_READ;
1372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1373 flags |= qc->tag << CRQB_TAG_SHIFT;
1374 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1375 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
1377 /* get current queue index from software */
1378 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1380 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1381 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1382 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1383 crqb->flags = cpu_to_le32(flags);
1386 crqb->ata_cmd[0] = cpu_to_le32(
1387 (tf->command << 16) |
1390 crqb->ata_cmd[1] = cpu_to_le32(
1396 crqb->ata_cmd[2] = cpu_to_le32(
1397 (tf->hob_lbal << 0) |
1398 (tf->hob_lbam << 8) |
1399 (tf->hob_lbah << 16) |
1400 (tf->hob_feature << 24)
1402 crqb->ata_cmd[3] = cpu_to_le32(
1404 (tf->hob_nsect << 8)
1407 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1413 * mv_qc_issue - Initiate a command to the host
1414 * @qc: queued command to start
1416 * This routine simply redirects to the general purpose routine
1417 * if command is not DMA. Else, it sanity checks our local
1418 * caches of the request producer/consumer indices then enables
1419 * DMA and bumps the request producer index.
1422 * Inherited from caller.
1424 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1426 struct ata_port *ap = qc->ap;
1427 void __iomem *port_mmio = mv_ap_base(ap);
1428 struct mv_port_priv *pp = ap->private_data;
1431 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1432 (qc->tf.protocol != ATA_PROT_NCQ)) {
1434 * We're about to send a non-EDMA capable command to the
1435 * port. Turn off EDMA so there won't be problems accessing
1436 * shadow block, etc registers.
1439 mv_pmp_select(ap, qc->dev->link->pmp);
1440 return ata_sff_qc_issue(qc);
1443 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1447 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1449 /* and write the request in pointer to kick the EDMA to life */
1450 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1451 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1457 * mv_err_intr - Handle error interrupts on the port
1458 * @ap: ATA channel to manipulate
1459 * @reset_allowed: bool: 0 == don't trigger from reset here
1461 * In most cases, just clear the interrupt and move on. However,
1462 * some cases require an eDMA reset, which also performs a COMRESET.
1463 * The SERR case requires a clear of pending errors in the SATA
1464 * SERROR register. Finally, if the port disabled DMA,
1465 * update our cached copy to match.
1468 * Inherited from caller.
1470 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1472 void __iomem *port_mmio = mv_ap_base(ap);
1473 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1474 struct mv_port_priv *pp = ap->private_data;
1475 struct mv_host_priv *hpriv = ap->host->private_data;
1476 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1477 unsigned int action = 0, err_mask = 0;
1478 struct ata_eh_info *ehi = &ap->link.eh_info;
1480 ata_ehi_clear_desc(ehi);
1482 if (!edma_enabled) {
1483 /* just a guess: do we need to do this? should we
1484 * expand this, and do it in all cases?
1486 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1487 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1490 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1492 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1495 * all generations share these EDMA error cause bits
1498 if (edma_err_cause & EDMA_ERR_DEV)
1499 err_mask |= AC_ERR_DEV;
1500 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1501 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1502 EDMA_ERR_INTRL_PAR)) {
1503 err_mask |= AC_ERR_ATA_BUS;
1504 action |= ATA_EH_RESET;
1505 ata_ehi_push_desc(ehi, "parity error");
1507 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1508 ata_ehi_hotplugged(ehi);
1509 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1510 "dev disconnect" : "dev connect");
1511 action |= ATA_EH_RESET;
1514 if (IS_GEN_I(hpriv)) {
1515 eh_freeze_mask = EDMA_EH_FREEZE_5;
1517 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1518 pp = ap->private_data;
1519 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1520 ata_ehi_push_desc(ehi, "EDMA self-disable");
1523 eh_freeze_mask = EDMA_EH_FREEZE;
1525 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1526 pp = ap->private_data;
1527 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1528 ata_ehi_push_desc(ehi, "EDMA self-disable");
1531 if (edma_err_cause & EDMA_ERR_SERR) {
1532 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1533 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1534 err_mask = AC_ERR_ATA_BUS;
1535 action |= ATA_EH_RESET;
1539 /* Clear EDMA now that SERR cleanup done */
1540 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1543 err_mask = AC_ERR_OTHER;
1544 action |= ATA_EH_RESET;
1547 ehi->serror |= serr;
1548 ehi->action |= action;
1551 qc->err_mask |= err_mask;
1553 ehi->err_mask |= err_mask;
1555 if (edma_err_cause & eh_freeze_mask)
1556 ata_port_freeze(ap);
1561 static void mv_intr_pio(struct ata_port *ap)
1563 struct ata_queued_cmd *qc;
1566 /* ignore spurious intr if drive still BUSY */
1567 ata_status = readb(ap->ioaddr.status_addr);
1568 if (unlikely(ata_status & ATA_BUSY))
1571 /* get active ATA command */
1572 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1573 if (unlikely(!qc)) /* no active tag */
1575 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1578 /* and finally, complete the ATA command */
1579 qc->err_mask |= ac_err_mask(ata_status);
1580 ata_qc_complete(qc);
1583 static void mv_intr_edma(struct ata_port *ap)
1585 void __iomem *port_mmio = mv_ap_base(ap);
1586 struct mv_host_priv *hpriv = ap->host->private_data;
1587 struct mv_port_priv *pp = ap->private_data;
1588 struct ata_queued_cmd *qc;
1589 u32 out_index, in_index;
1590 bool work_done = false;
1592 /* get h/w response queue pointer */
1593 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1594 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1600 /* get s/w response queue last-read pointer, and compare */
1601 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1602 if (in_index == out_index)
1605 /* 50xx: get active ATA command */
1606 if (IS_GEN_I(hpriv))
1607 tag = ap->link.active_tag;
1609 /* Gen II/IIE: get active ATA command via tag, to enable
1610 * support for queueing. this works transparently for
1611 * queued and non-queued modes.
1614 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1616 qc = ata_qc_from_tag(ap, tag);
1618 /* For non-NCQ mode, the lower 8 bits of status
1619 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1620 * which should be zero if all went well.
1622 status = le16_to_cpu(pp->crpb[out_index].flags);
1623 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1624 mv_err_intr(ap, qc);
1628 /* and finally, complete the ATA command */
1631 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1632 ata_qc_complete(qc);
1635 /* advance software response queue pointer, to
1636 * indicate (after the loop completes) to hardware
1637 * that we have consumed a response queue entry.
1644 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1645 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1646 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1650 * mv_host_intr - Handle all interrupts on the given host controller
1651 * @host: host specific structure
1652 * @relevant: port error bits relevant to this host controller
1653 * @hc: which host controller we're to look at
1655 * Read then write clear the HC interrupt status then walk each
1656 * port connected to the HC and see if it needs servicing. Port
1657 * success ints are reported in the HC interrupt status reg, the
1658 * port error ints are reported in the higher level main
1659 * interrupt status register and thus are passed in via the
1660 * 'relevant' argument.
1663 * Inherited from caller.
1665 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1667 struct mv_host_priv *hpriv = host->private_data;
1668 void __iomem *mmio = hpriv->base;
1669 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1671 int port, port0, last_port;
1676 port0 = MV_PORTS_PER_HC;
1679 last_port = port0 + MV_PORTS_PER_HC;
1681 last_port = port0 + hpriv->n_ports;
1682 /* we'll need the HC success int register in most cases */
1683 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1687 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1689 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1690 hc, relevant, hc_irq_cause);
1692 for (port = port0; port < last_port; port++) {
1693 struct ata_port *ap = host->ports[port];
1694 struct mv_port_priv *pp;
1695 int have_err_bits, hard_port, shift;
1697 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1700 pp = ap->private_data;
1702 shift = port << 1; /* (port * 2) */
1703 if (port >= MV_PORTS_PER_HC)
1704 shift++; /* skip bit 8 in the HC Main IRQ reg */
1706 have_err_bits = ((PORT0_ERR << shift) & relevant);
1708 if (unlikely(have_err_bits)) {
1709 struct ata_queued_cmd *qc;
1711 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1712 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1715 mv_err_intr(ap, qc);
1719 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1721 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1722 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1725 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1732 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1734 struct mv_host_priv *hpriv = host->private_data;
1735 struct ata_port *ap;
1736 struct ata_queued_cmd *qc;
1737 struct ata_eh_info *ehi;
1738 unsigned int i, err_mask, printed = 0;
1741 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1743 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1746 DPRINTK("All regs @ PCI error\n");
1747 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1749 writelfl(0, mmio + hpriv->irq_cause_ofs);
1751 for (i = 0; i < host->n_ports; i++) {
1752 ap = host->ports[i];
1753 if (!ata_link_offline(&ap->link)) {
1754 ehi = &ap->link.eh_info;
1755 ata_ehi_clear_desc(ehi);
1757 ata_ehi_push_desc(ehi,
1758 "PCI err cause 0x%08x", err_cause);
1759 err_mask = AC_ERR_HOST_BUS;
1760 ehi->action = ATA_EH_RESET;
1761 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1763 qc->err_mask |= err_mask;
1765 ehi->err_mask |= err_mask;
1767 ata_port_freeze(ap);
1773 * mv_interrupt - Main interrupt event handler
1775 * @dev_instance: private data; in this case the host structure
1777 * Read the read only register to determine if any host
1778 * controllers have pending interrupts. If so, call lower level
1779 * routine to handle. Also check for PCI errors which are only
1783 * This routine holds the host lock while processing pending
1786 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1788 struct ata_host *host = dev_instance;
1789 struct mv_host_priv *hpriv = host->private_data;
1790 unsigned int hc, handled = 0, n_hcs;
1791 void __iomem *mmio = hpriv->base;
1792 u32 irq_stat, irq_mask;
1794 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1795 spin_lock(&host->lock);
1797 irq_stat = readl(hpriv->main_cause_reg_addr);
1798 irq_mask = readl(hpriv->main_mask_reg_addr);
1800 /* check the cases where we either have nothing pending or have read
1801 * a bogus register value which can indicate HW removal or PCI fault
1803 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1806 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1808 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1809 mv_pci_error(host, mmio);
1811 goto out_unlock; /* skip all other HC irq handling */
1814 for (hc = 0; hc < n_hcs; hc++) {
1815 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1817 mv_host_intr(host, relevant, hc);
1823 spin_unlock(&host->lock);
1825 return IRQ_RETVAL(handled);
1828 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1832 switch (sc_reg_in) {
1836 ofs = sc_reg_in * sizeof(u32);
1845 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1847 struct mv_host_priv *hpriv = ap->host->private_data;
1848 void __iomem *mmio = hpriv->base;
1849 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1850 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1852 if (ofs != 0xffffffffU) {
1853 *val = readl(addr + ofs);
1859 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1861 struct mv_host_priv *hpriv = ap->host->private_data;
1862 void __iomem *mmio = hpriv->base;
1863 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1864 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1866 if (ofs != 0xffffffffU) {
1867 writelfl(val, addr + ofs);
1873 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1875 struct pci_dev *pdev = to_pci_dev(host->dev);
1878 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1881 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1883 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1886 mv_reset_pci_bus(host, mmio);
1889 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1891 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1894 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1897 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1900 tmp = readl(phy_mmio + MV5_PHY_MODE);
1902 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1903 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1906 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1910 writel(0, mmio + MV_GPIO_PORT_CTL);
1912 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1914 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1916 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1919 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1922 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1923 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1925 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1928 tmp = readl(phy_mmio + MV5_LT_MODE);
1930 writel(tmp, phy_mmio + MV5_LT_MODE);
1932 tmp = readl(phy_mmio + MV5_PHY_CTL);
1935 writel(tmp, phy_mmio + MV5_PHY_CTL);
1938 tmp = readl(phy_mmio + MV5_PHY_MODE);
1940 tmp |= hpriv->signal[port].pre;
1941 tmp |= hpriv->signal[port].amps;
1942 writel(tmp, phy_mmio + MV5_PHY_MODE);
1947 #define ZERO(reg) writel(0, port_mmio + (reg))
1948 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1951 void __iomem *port_mmio = mv_port_base(mmio, port);
1954 * The datasheet warns against setting ATA_RST when EDMA is active
1955 * (but doesn't say what the problem might be). So we first try
1956 * to disable the EDMA engine before doing the ATA_RST operation.
1958 mv_reset_channel(hpriv, mmio, port);
1960 ZERO(0x028); /* command */
1961 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1962 ZERO(0x004); /* timer */
1963 ZERO(0x008); /* irq err cause */
1964 ZERO(0x00c); /* irq err mask */
1965 ZERO(0x010); /* rq bah */
1966 ZERO(0x014); /* rq inp */
1967 ZERO(0x018); /* rq outp */
1968 ZERO(0x01c); /* respq bah */
1969 ZERO(0x024); /* respq outp */
1970 ZERO(0x020); /* respq inp */
1971 ZERO(0x02c); /* test control */
1972 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1976 #define ZERO(reg) writel(0, hc_mmio + (reg))
1977 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1980 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1988 tmp = readl(hc_mmio + 0x20);
1991 writel(tmp, hc_mmio + 0x20);
1995 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1998 unsigned int hc, port;
2000 for (hc = 0; hc < n_hc; hc++) {
2001 for (port = 0; port < MV_PORTS_PER_HC; port++)
2002 mv5_reset_hc_port(hpriv, mmio,
2003 (hc * MV_PORTS_PER_HC) + port);
2005 mv5_reset_one_hc(hpriv, mmio, hc);
2012 #define ZERO(reg) writel(0, mmio + (reg))
2013 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2015 struct mv_host_priv *hpriv = host->private_data;
2018 tmp = readl(mmio + MV_PCI_MODE);
2020 writel(tmp, mmio + MV_PCI_MODE);
2022 ZERO(MV_PCI_DISC_TIMER);
2023 ZERO(MV_PCI_MSI_TRIGGER);
2024 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2025 ZERO(HC_MAIN_IRQ_MASK_OFS);
2026 ZERO(MV_PCI_SERR_MASK);
2027 ZERO(hpriv->irq_cause_ofs);
2028 ZERO(hpriv->irq_mask_ofs);
2029 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2030 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2031 ZERO(MV_PCI_ERR_ATTRIBUTE);
2032 ZERO(MV_PCI_ERR_COMMAND);
2036 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2040 mv5_reset_flash(hpriv, mmio);
2042 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2044 tmp |= (1 << 5) | (1 << 6);
2045 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2049 * mv6_reset_hc - Perform the 6xxx global soft reset
2050 * @mmio: base address of the HBA
2052 * This routine only applies to 6xxx parts.
2055 * Inherited from caller.
2057 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2060 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2064 /* Following procedure defined in PCI "main command and status
2068 writel(t | STOP_PCI_MASTER, reg);
2070 for (i = 0; i < 1000; i++) {
2073 if (PCI_MASTER_EMPTY & t)
2076 if (!(PCI_MASTER_EMPTY & t)) {
2077 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2085 writel(t | GLOB_SFT_RST, reg);
2088 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2090 if (!(GLOB_SFT_RST & t)) {
2091 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2096 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2099 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2102 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2104 if (GLOB_SFT_RST & t) {
2105 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2112 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2115 void __iomem *port_mmio;
2118 tmp = readl(mmio + MV_RESET_CFG);
2119 if ((tmp & (1 << 0)) == 0) {
2120 hpriv->signal[idx].amps = 0x7 << 8;
2121 hpriv->signal[idx].pre = 0x1 << 5;
2125 port_mmio = mv_port_base(mmio, idx);
2126 tmp = readl(port_mmio + PHY_MODE2);
2128 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2129 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2132 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2134 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2137 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2140 void __iomem *port_mmio = mv_port_base(mmio, port);
2142 u32 hp_flags = hpriv->hp_flags;
2144 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2146 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2149 if (fix_phy_mode2) {
2150 m2 = readl(port_mmio + PHY_MODE2);
2153 writel(m2, port_mmio + PHY_MODE2);
2157 m2 = readl(port_mmio + PHY_MODE2);
2158 m2 &= ~((1 << 16) | (1 << 31));
2159 writel(m2, port_mmio + PHY_MODE2);
2164 /* who knows what this magic does */
2165 tmp = readl(port_mmio + PHY_MODE3);
2168 writel(tmp, port_mmio + PHY_MODE3);
2170 if (fix_phy_mode4) {
2173 m4 = readl(port_mmio + PHY_MODE4);
2175 if (hp_flags & MV_HP_ERRATA_60X1B2)
2176 tmp = readl(port_mmio + PHY_MODE3);
2178 /* workaround for errata FEr SATA#10 (part 1) */
2179 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2181 writel(m4, port_mmio + PHY_MODE4);
2183 if (hp_flags & MV_HP_ERRATA_60X1B2)
2184 writel(tmp, port_mmio + PHY_MODE3);
2187 /* Revert values of pre-emphasis and signal amps to the saved ones */
2188 m2 = readl(port_mmio + PHY_MODE2);
2190 m2 &= ~MV_M2_PREAMP_MASK;
2191 m2 |= hpriv->signal[port].amps;
2192 m2 |= hpriv->signal[port].pre;
2195 /* according to mvSata 3.6.1, some IIE values are fixed */
2196 if (IS_GEN_IIE(hpriv)) {
2201 writel(m2, port_mmio + PHY_MODE2);
2204 /* TODO: use the generic LED interface to configure the SATA Presence */
2205 /* & Acitivy LEDs on the board */
2206 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2212 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2215 void __iomem *port_mmio;
2218 port_mmio = mv_port_base(mmio, idx);
2219 tmp = readl(port_mmio + PHY_MODE2);
2221 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2222 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2226 #define ZERO(reg) writel(0, port_mmio + (reg))
2227 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2228 void __iomem *mmio, unsigned int port)
2230 void __iomem *port_mmio = mv_port_base(mmio, port);
2233 * The datasheet warns against setting ATA_RST when EDMA is active
2234 * (but doesn't say what the problem might be). So we first try
2235 * to disable the EDMA engine before doing the ATA_RST operation.
2237 mv_reset_channel(hpriv, mmio, port);
2239 ZERO(0x028); /* command */
2240 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2241 ZERO(0x004); /* timer */
2242 ZERO(0x008); /* irq err cause */
2243 ZERO(0x00c); /* irq err mask */
2244 ZERO(0x010); /* rq bah */
2245 ZERO(0x014); /* rq inp */
2246 ZERO(0x018); /* rq outp */
2247 ZERO(0x01c); /* respq bah */
2248 ZERO(0x024); /* respq outp */
2249 ZERO(0x020); /* respq inp */
2250 ZERO(0x02c); /* test control */
2251 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2256 #define ZERO(reg) writel(0, hc_mmio + (reg))
2257 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2260 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2270 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2271 void __iomem *mmio, unsigned int n_hc)
2275 for (port = 0; port < hpriv->n_ports; port++)
2276 mv_soc_reset_hc_port(hpriv, mmio, port);
2278 mv_soc_reset_one_hc(hpriv, mmio);
2283 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2289 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2294 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2296 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2298 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2300 ifctl |= (1 << 7); /* enable gen2i speed */
2301 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2305 * Caller must ensure that EDMA is not active,
2306 * by first doing mv_stop_edma() where needed.
2308 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2309 unsigned int port_no)
2311 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2313 mv_stop_edma_engine(port_mmio);
2314 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2316 if (!IS_GEN_I(hpriv)) {
2317 /* Enable 3.0gb/s link speed */
2318 mv_setup_ifctl(port_mmio, 1);
2321 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2322 * link, and physical layers. It resets all SATA interface registers
2323 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2325 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2326 udelay(25); /* allow reset propagation */
2327 writelfl(0, port_mmio + EDMA_CMD_OFS);
2329 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2331 if (IS_GEN_I(hpriv))
2335 static void mv_pmp_select(struct ata_port *ap, int pmp)
2337 if (sata_pmp_supported(ap)) {
2338 void __iomem *port_mmio = mv_ap_base(ap);
2339 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2340 int old = reg & 0xf;
2343 reg = (reg & ~0xf) | pmp;
2344 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2349 static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2350 unsigned long deadline)
2352 mv_pmp_select(link->ap, sata_srst_pmp(link));
2353 return sata_std_hardreset(link, class, deadline);
2356 static int mv_softreset(struct ata_link *link, unsigned int *class,
2357 unsigned long deadline)
2359 mv_pmp_select(link->ap, sata_srst_pmp(link));
2360 return ata_sff_softreset(link, class, deadline);
2363 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
2366 struct ata_port *ap = link->ap;
2367 struct mv_host_priv *hpriv = ap->host->private_data;
2368 struct mv_port_priv *pp = ap->private_data;
2369 void __iomem *mmio = hpriv->base;
2370 int rc, attempts = 0, extra = 0;
2374 mv_reset_channel(hpriv, mmio, ap->port_no);
2375 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2377 /* Workaround for errata FEr SATA#10 (part 2) */
2379 const unsigned long *timing =
2380 sata_ehc_deb_timing(&link->eh_context);
2382 rc = sata_link_hardreset(link, timing, deadline + extra,
2386 sata_scr_read(link, SCR_STATUS, &sstatus);
2387 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2388 /* Force 1.5gb/s link speed and try again */
2389 mv_setup_ifctl(mv_ap_base(ap), 0);
2390 if (time_after(jiffies + HZ, deadline))
2391 extra = HZ; /* only extend it once, max */
2393 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2398 static void mv_eh_freeze(struct ata_port *ap)
2400 struct mv_host_priv *hpriv = ap->host->private_data;
2401 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2405 /* FIXME: handle coalescing completion events properly */
2407 shift = ap->port_no * 2;
2411 mask = 0x3 << shift;
2413 /* disable assertion of portN err, done events */
2414 tmp = readl(hpriv->main_mask_reg_addr);
2415 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2418 static void mv_eh_thaw(struct ata_port *ap)
2420 struct mv_host_priv *hpriv = ap->host->private_data;
2421 void __iomem *mmio = hpriv->base;
2422 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2423 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2424 void __iomem *port_mmio = mv_ap_base(ap);
2425 u32 tmp, mask, hc_irq_cause;
2426 unsigned int shift, hc_port_no = ap->port_no;
2428 /* FIXME: handle coalescing completion events properly */
2430 shift = ap->port_no * 2;
2436 mask = 0x3 << shift;
2438 /* clear EDMA errors on this port */
2439 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2441 /* clear pending irq events */
2442 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2443 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2444 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2445 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2447 /* enable assertion of portN err, done events */
2448 tmp = readl(hpriv->main_mask_reg_addr);
2449 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2453 * mv_port_init - Perform some early initialization on a single port.
2454 * @port: libata data structure storing shadow register addresses
2455 * @port_mmio: base address of the port
2457 * Initialize shadow register mmio addresses, clear outstanding
2458 * interrupts on the port, and unmask interrupts for the future
2459 * start of the port.
2462 * Inherited from caller.
2464 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2466 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2469 /* PIO related setup
2471 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2473 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2474 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2475 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2476 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2477 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2478 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2480 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2481 /* special case: control/altstatus doesn't have ATA_REG_ address */
2482 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2485 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2487 /* Clear any currently outstanding port interrupt conditions */
2488 serr_ofs = mv_scr_offset(SCR_ERROR);
2489 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2490 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2492 /* unmask all non-transient EDMA error interrupts */
2493 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2495 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2496 readl(port_mmio + EDMA_CFG_OFS),
2497 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2498 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2501 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2503 struct pci_dev *pdev = to_pci_dev(host->dev);
2504 struct mv_host_priv *hpriv = host->private_data;
2505 u32 hp_flags = hpriv->hp_flags;
2507 switch (board_idx) {
2509 hpriv->ops = &mv5xxx_ops;
2510 hp_flags |= MV_HP_GEN_I;
2512 switch (pdev->revision) {
2514 hp_flags |= MV_HP_ERRATA_50XXB0;
2517 hp_flags |= MV_HP_ERRATA_50XXB2;
2520 dev_printk(KERN_WARNING, &pdev->dev,
2521 "Applying 50XXB2 workarounds to unknown rev\n");
2522 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 hpriv->ops = &mv5xxx_ops;
2530 hp_flags |= MV_HP_GEN_I;
2532 switch (pdev->revision) {
2534 hp_flags |= MV_HP_ERRATA_50XXB0;
2537 hp_flags |= MV_HP_ERRATA_50XXB2;
2540 dev_printk(KERN_WARNING, &pdev->dev,
2541 "Applying B2 workarounds to unknown rev\n");
2542 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 hpriv->ops = &mv6xxx_ops;
2550 hp_flags |= MV_HP_GEN_II;
2552 switch (pdev->revision) {
2554 hp_flags |= MV_HP_ERRATA_60X1B2;
2557 hp_flags |= MV_HP_ERRATA_60X1C0;
2560 dev_printk(KERN_WARNING, &pdev->dev,
2561 "Applying B2 workarounds to unknown rev\n");
2562 hp_flags |= MV_HP_ERRATA_60X1B2;
2568 hp_flags |= MV_HP_PCIE;
2569 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2570 (pdev->device == 0x2300 || pdev->device == 0x2310))
2573 * Highpoint RocketRAID PCIe 23xx series cards:
2575 * Unconfigured drives are treated as "Legacy"
2576 * by the BIOS, and it overwrites sector 8 with
2577 * a "Lgcy" metadata block prior to Linux boot.
2579 * Configured drives (RAID or JBOD) leave sector 8
2580 * alone, but instead overwrite a high numbered
2581 * sector for the RAID metadata. This sector can
2582 * be determined exactly, by truncating the physical
2583 * drive capacity to a nice even GB value.
2585 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2587 * Warn the user, lest they think we're just buggy.
2589 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2590 " BIOS CORRUPTS DATA on all attached drives,"
2591 " regardless of if/how they are configured."
2593 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2594 " use sectors 8-9 on \"Legacy\" drives,"
2595 " and avoid the final two gigabytes on"
2596 " all RocketRAID BIOS initialized drives.\n");
2599 hpriv->ops = &mv6xxx_ops;
2600 hp_flags |= MV_HP_GEN_IIE;
2602 switch (pdev->revision) {
2604 hp_flags |= MV_HP_ERRATA_XX42A0;
2607 hp_flags |= MV_HP_ERRATA_60X1C0;
2610 dev_printk(KERN_WARNING, &pdev->dev,
2611 "Applying 60X1C0 workarounds to unknown rev\n");
2612 hp_flags |= MV_HP_ERRATA_60X1C0;
2617 hpriv->ops = &mv_soc_ops;
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2622 dev_printk(KERN_ERR, host->dev,
2623 "BUG: invalid board index %u\n", board_idx);
2627 hpriv->hp_flags = hp_flags;
2628 if (hp_flags & MV_HP_PCIE) {
2629 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2630 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2631 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2633 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2634 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2635 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2642 * mv_init_host - Perform some early initialization of the host.
2643 * @host: ATA host to initialize
2644 * @board_idx: controller index
2646 * If possible, do an early global reset of the host. Then do
2647 * our port init and clear/unmask all/relevant host interrupts.
2650 * Inherited from caller.
2652 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2654 int rc = 0, n_hc, port, hc;
2655 struct mv_host_priv *hpriv = host->private_data;
2656 void __iomem *mmio = hpriv->base;
2658 rc = mv_chip_id(host, board_idx);
2662 if (HAS_PCI(host)) {
2663 hpriv->main_cause_reg_addr = hpriv->base +
2664 HC_MAIN_IRQ_CAUSE_OFS;
2665 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2667 hpriv->main_cause_reg_addr = hpriv->base +
2668 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2669 hpriv->main_mask_reg_addr = hpriv->base +
2670 HC_SOC_MAIN_IRQ_MASK_OFS;
2672 /* global interrupt mask */
2673 writel(0, hpriv->main_mask_reg_addr);
2675 n_hc = mv_get_hc_count(host->ports[0]->flags);
2677 for (port = 0; port < host->n_ports; port++)
2678 hpriv->ops->read_preamp(hpriv, port, mmio);
2680 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2684 hpriv->ops->reset_flash(hpriv, mmio);
2685 hpriv->ops->reset_bus(host, mmio);
2686 hpriv->ops->enable_leds(hpriv, mmio);
2688 for (port = 0; port < host->n_ports; port++) {
2689 struct ata_port *ap = host->ports[port];
2690 void __iomem *port_mmio = mv_port_base(mmio, port);
2692 mv_port_init(&ap->ioaddr, port_mmio);
2695 if (HAS_PCI(host)) {
2696 unsigned int offset = port_mmio - mmio;
2697 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2698 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2703 for (hc = 0; hc < n_hc; hc++) {
2704 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2706 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2707 "(before clear)=0x%08x\n", hc,
2708 readl(hc_mmio + HC_CFG_OFS),
2709 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2711 /* Clear any currently outstanding hc interrupt conditions */
2712 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2715 if (HAS_PCI(host)) {
2716 /* Clear any currently outstanding host interrupt conditions */
2717 writelfl(0, mmio + hpriv->irq_cause_ofs);
2719 /* and unmask interrupt generation for host regs */
2720 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2721 if (IS_GEN_I(hpriv))
2722 writelfl(~HC_MAIN_MASKED_IRQS_5,
2723 hpriv->main_mask_reg_addr);
2725 writelfl(~HC_MAIN_MASKED_IRQS,
2726 hpriv->main_mask_reg_addr);
2728 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2729 "PCI int cause/mask=0x%08x/0x%08x\n",
2730 readl(hpriv->main_cause_reg_addr),
2731 readl(hpriv->main_mask_reg_addr),
2732 readl(mmio + hpriv->irq_cause_ofs),
2733 readl(mmio + hpriv->irq_mask_ofs));
2735 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2736 hpriv->main_mask_reg_addr);
2737 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2738 readl(hpriv->main_cause_reg_addr),
2739 readl(hpriv->main_mask_reg_addr));
2745 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2747 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2749 if (!hpriv->crqb_pool)
2752 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2754 if (!hpriv->crpb_pool)
2757 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2759 if (!hpriv->sg_tbl_pool)
2766 * mv_platform_probe - handle a positive probe of an soc Marvell
2768 * @pdev: platform device found
2771 * Inherited from caller.
2773 static int mv_platform_probe(struct platform_device *pdev)
2775 static int printed_version;
2776 const struct mv_sata_platform_data *mv_platform_data;
2777 const struct ata_port_info *ppi[] =
2778 { &mv_port_info[chip_soc], NULL };
2779 struct ata_host *host;
2780 struct mv_host_priv *hpriv;
2781 struct resource *res;
2784 if (!printed_version++)
2785 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2788 * Simple resource validation ..
2790 if (unlikely(pdev->num_resources != 2)) {
2791 dev_err(&pdev->dev, "invalid number of resources\n");
2796 * Get the register base first
2798 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2803 mv_platform_data = pdev->dev.platform_data;
2804 n_ports = mv_platform_data->n_ports;
2806 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2807 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2809 if (!host || !hpriv)
2811 host->private_data = hpriv;
2812 hpriv->n_ports = n_ports;
2815 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2816 res->end - res->start + 1);
2817 hpriv->base -= MV_SATAHC0_REG_BASE;
2819 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2823 /* initialize adapter */
2824 rc = mv_init_host(host, chip_soc);
2828 dev_printk(KERN_INFO, &pdev->dev,
2829 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2832 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2833 IRQF_SHARED, &mv6_sht);
2838 * mv_platform_remove - unplug a platform interface
2839 * @pdev: platform device
2841 * A platform bus SATA device has been unplugged. Perform the needed
2842 * cleanup. Also called on module unload for any active devices.
2844 static int __devexit mv_platform_remove(struct platform_device *pdev)
2846 struct device *dev = &pdev->dev;
2847 struct ata_host *host = dev_get_drvdata(dev);
2849 ata_host_detach(host);
2853 static struct platform_driver mv_platform_driver = {
2854 .probe = mv_platform_probe,
2855 .remove = __devexit_p(mv_platform_remove),
2858 .owner = THIS_MODULE,
2864 static int mv_pci_init_one(struct pci_dev *pdev,
2865 const struct pci_device_id *ent);
2868 static struct pci_driver mv_pci_driver = {
2870 .id_table = mv_pci_tbl,
2871 .probe = mv_pci_init_one,
2872 .remove = ata_pci_remove_one,
2878 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2881 /* move to PCI layer or libata core? */
2882 static int pci_go_64(struct pci_dev *pdev)
2886 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2887 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2889 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2891 dev_printk(KERN_ERR, &pdev->dev,
2892 "64-bit DMA enable failed\n");
2897 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2899 dev_printk(KERN_ERR, &pdev->dev,
2900 "32-bit DMA enable failed\n");
2903 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2905 dev_printk(KERN_ERR, &pdev->dev,
2906 "32-bit consistent DMA enable failed\n");
2915 * mv_print_info - Dump key info to kernel log for perusal.
2916 * @host: ATA host to print info about
2918 * FIXME: complete this.
2921 * Inherited from caller.
2923 static void mv_print_info(struct ata_host *host)
2925 struct pci_dev *pdev = to_pci_dev(host->dev);
2926 struct mv_host_priv *hpriv = host->private_data;
2928 const char *scc_s, *gen;
2930 /* Use this to determine the HW stepping of the chip so we know
2931 * what errata to workaround
2933 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2936 else if (scc == 0x01)
2941 if (IS_GEN_I(hpriv))
2943 else if (IS_GEN_II(hpriv))
2945 else if (IS_GEN_IIE(hpriv))
2950 dev_printk(KERN_INFO, &pdev->dev,
2951 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2952 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2953 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2957 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2958 * @pdev: PCI device found
2959 * @ent: PCI device ID entry for the matched host
2962 * Inherited from caller.
2964 static int mv_pci_init_one(struct pci_dev *pdev,
2965 const struct pci_device_id *ent)
2967 static int printed_version;
2968 unsigned int board_idx = (unsigned int)ent->driver_data;
2969 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2970 struct ata_host *host;
2971 struct mv_host_priv *hpriv;
2974 if (!printed_version++)
2975 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2978 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2980 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2981 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2982 if (!host || !hpriv)
2984 host->private_data = hpriv;
2985 hpriv->n_ports = n_ports;
2987 /* acquire resources */
2988 rc = pcim_enable_device(pdev);
2992 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2994 pcim_pin_device(pdev);
2997 host->iomap = pcim_iomap_table(pdev);
2998 hpriv->base = host->iomap[MV_PRIMARY_BAR];
3000 rc = pci_go_64(pdev);
3004 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3008 /* initialize adapter */
3009 rc = mv_init_host(host, board_idx);
3013 /* Enable interrupts */
3014 if (msi && pci_enable_msi(pdev))
3017 mv_dump_pci_cfg(pdev, 0x68);
3018 mv_print_info(host);
3020 pci_set_master(pdev);
3021 pci_try_set_mwi(pdev);
3022 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
3023 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
3027 static int mv_platform_probe(struct platform_device *pdev);
3028 static int __devexit mv_platform_remove(struct platform_device *pdev);
3030 static int __init mv_init(void)
3034 rc = pci_register_driver(&mv_pci_driver);
3038 rc = platform_driver_register(&mv_platform_driver);
3042 pci_unregister_driver(&mv_pci_driver);
3047 static void __exit mv_exit(void)
3050 pci_unregister_driver(&mv_pci_driver);
3052 platform_driver_unregister(&mv_platform_driver);
3055 MODULE_AUTHOR("Brett Russ");
3056 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3057 MODULE_LICENSE("GPL");
3058 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3059 MODULE_VERSION(DRV_VERSION);
3060 MODULE_ALIAS("platform:" DRV_NAME);
3063 module_param(msi, int, 0444);
3064 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
3067 module_init(mv_init);
3068 module_exit(mv_exit);