2 * sata_mv.c - Marvell SATA support
4 * Copyright 2008: Marvell Corporation, all rights reserved.
5 * Copyright 2005: EMC Corporation, all rights reserved.
6 * Copyright 2005 Red Hat, Inc. All rights reserved.
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
33 2) Improve/fix IRQ and error handling sequences.
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
43 6) Add port multiplier support (intermediate)
45 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
47 8) Develop a low-power-consumption strategy, and implement it.
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
67 #include <linux/kernel.h>
68 #include <linux/module.h>
69 #include <linux/pci.h>
70 #include <linux/init.h>
71 #include <linux/blkdev.h>
72 #include <linux/delay.h>
73 #include <linux/interrupt.h>
74 #include <linux/dmapool.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <linux/platform_device.h>
78 #include <linux/ata_platform.h>
79 #include <scsi/scsi_host.h>
80 #include <scsi/scsi_cmnd.h>
81 #include <scsi/scsi_device.h>
82 #include <linux/libata.h>
84 #define DRV_NAME "sata_mv"
85 #define DRV_VERSION "1.20"
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
98 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104 MV_SATAHC0_REG_BASE = 0x20000,
105 MV_FLASH_CTL = 0x1046c,
106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
135 /* SoC integrated controllers, no PCI interface */
136 MV_FLAG_SOC = (1 << 28),
138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
143 CRQB_FLAG_READ = (1 << 0),
145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
152 CRPB_FLAG_STATUS_SHIFT = 8,
153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
158 /* PCI interface registers */
160 PCI_COMMAND_OFS = 0xc00,
162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
213 /* SATAHC registers */
216 HC_IRQ_CAUSE_OFS = 0x14,
217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
221 /* Shadow block registers */
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
246 SATA_INTERFACE_CFG = 0x050,
248 MV_M2_PREAMP_MASK = 0x7e0,
252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
341 EDMA_RSP_Q_PTR_SHIFT = 3,
343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
348 EDMA_IORDY_TMOUT = 0x34,
351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
357 MV_HP_ERRATA_XX42A0 = (1 << 5),
358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
363 /* Port private flags (pp_flags) */
364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
368 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
370 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
371 #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
377 MV_DMA_BOUNDARY = 0xffffU,
379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
384 /* ditto, for response queue */
385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
399 /* Command ReQuest Block: 32B */
415 /* Command ResPonse Block: 8B */
422 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430 struct mv_port_priv {
431 struct mv_crqb *crqb;
433 struct mv_crpb *crpb;
435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
438 unsigned int req_idx;
439 unsigned int resp_idx;
444 struct mv_port_signal {
449 struct mv_host_priv {
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
482 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
486 static int mv_port_start(struct ata_port *ap);
487 static void mv_port_stop(struct ata_port *ap);
488 static void mv_qc_prep(struct ata_queued_cmd *qc);
489 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
490 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
491 static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
493 static void mv_eh_freeze(struct ata_port *ap);
494 static void mv_eh_thaw(struct ata_port *ap);
495 static void mv6_dev_config(struct ata_device *dev);
497 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
499 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
502 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
504 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
505 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
507 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
509 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
512 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
514 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
515 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
517 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
519 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
523 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
524 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
525 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
526 unsigned int port_no);
527 static int mv_stop_edma(struct ata_port *ap);
528 static int mv_stop_edma_engine(void __iomem *port_mmio);
529 static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
531 /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
532 * because we have to allow room for worst case splitting of
533 * PRDs for 64K boundaries in mv_fill_sg().
535 static struct scsi_host_template mv5_sht = {
536 ATA_BASE_SHT(DRV_NAME),
537 .sg_tablesize = MV_MAX_SG_CT / 2,
538 .dma_boundary = MV_DMA_BOUNDARY,
541 static struct scsi_host_template mv6_sht = {
542 ATA_NCQ_SHT(DRV_NAME),
543 .can_queue = MV_MAX_Q_DEPTH - 1,
544 .sg_tablesize = MV_MAX_SG_CT / 2,
545 .dma_boundary = MV_DMA_BOUNDARY,
548 static struct ata_port_operations mv5_ops = {
549 .inherits = &ata_sff_port_ops,
551 .qc_prep = mv_qc_prep,
552 .qc_issue = mv_qc_issue,
554 .freeze = mv_eh_freeze,
556 .hardreset = mv_hardreset,
557 .error_handler = ata_std_error_handler, /* avoid SFF EH */
558 .post_internal_cmd = ATA_OP_NULL,
560 .scr_read = mv5_scr_read,
561 .scr_write = mv5_scr_write,
563 .port_start = mv_port_start,
564 .port_stop = mv_port_stop,
567 static struct ata_port_operations mv6_ops = {
568 .inherits = &mv5_ops,
569 .qc_defer = ata_std_qc_defer,
570 .dev_config = mv6_dev_config,
571 .scr_read = mv_scr_read,
572 .scr_write = mv_scr_write,
575 static struct ata_port_operations mv_iie_ops = {
576 .inherits = &mv6_ops,
577 .dev_config = ATA_OP_NULL,
578 .qc_prep = mv_qc_prep_iie,
581 static const struct ata_port_info mv_port_info[] = {
583 .flags = MV_COMMON_FLAGS,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv5_ops,
589 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv5_ops,
595 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
596 .pio_mask = 0x1f, /* pio0-4 */
597 .udma_mask = ATA_UDMA6,
598 .port_ops = &mv5_ops,
601 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
603 .pio_mask = 0x1f, /* pio0-4 */
604 .udma_mask = ATA_UDMA6,
605 .port_ops = &mv6_ops,
608 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
609 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
610 .pio_mask = 0x1f, /* pio0-4 */
611 .udma_mask = ATA_UDMA6,
612 .port_ops = &mv6_ops,
615 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
617 .pio_mask = 0x1f, /* pio0-4 */
618 .udma_mask = ATA_UDMA6,
619 .port_ops = &mv_iie_ops,
622 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
624 .pio_mask = 0x1f, /* pio0-4 */
625 .udma_mask = ATA_UDMA6,
626 .port_ops = &mv_iie_ops,
629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
630 ATA_FLAG_NCQ | MV_FLAG_SOC,
631 .pio_mask = 0x1f, /* pio0-4 */
632 .udma_mask = ATA_UDMA6,
633 .port_ops = &mv_iie_ops,
637 static const struct pci_device_id mv_pci_tbl[] = {
638 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
639 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
640 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
641 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
642 /* RocketRAID 1740/174x have different identifiers */
643 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
644 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
646 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
647 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
648 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
649 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
650 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
652 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
655 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
657 /* Marvell 7042 support */
658 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
660 /* Highpoint RocketRAID PCIe series */
661 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
662 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
664 { } /* terminate list */
667 static const struct mv_hw_ops mv5xxx_ops = {
668 .phy_errata = mv5_phy_errata,
669 .enable_leds = mv5_enable_leds,
670 .read_preamp = mv5_read_preamp,
671 .reset_hc = mv5_reset_hc,
672 .reset_flash = mv5_reset_flash,
673 .reset_bus = mv5_reset_bus,
676 static const struct mv_hw_ops mv6xxx_ops = {
677 .phy_errata = mv6_phy_errata,
678 .enable_leds = mv6_enable_leds,
679 .read_preamp = mv6_read_preamp,
680 .reset_hc = mv6_reset_hc,
681 .reset_flash = mv6_reset_flash,
682 .reset_bus = mv_reset_pci_bus,
685 static const struct mv_hw_ops mv_soc_ops = {
686 .phy_errata = mv6_phy_errata,
687 .enable_leds = mv_soc_enable_leds,
688 .read_preamp = mv_soc_read_preamp,
689 .reset_hc = mv_soc_reset_hc,
690 .reset_flash = mv_soc_reset_flash,
691 .reset_bus = mv_soc_reset_bus,
698 static inline void writelfl(unsigned long data, void __iomem *addr)
701 (void) readl(addr); /* flush to avoid PCI posted write */
704 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
706 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
709 static inline unsigned int mv_hc_from_port(unsigned int port)
711 return port >> MV_PORT_HC_SHIFT;
714 static inline unsigned int mv_hardport_from_port(unsigned int port)
716 return port & MV_PORT_MASK;
719 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
722 return mv_hc_base(base, mv_hc_from_port(port));
725 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
727 return mv_hc_base_from_port(base, port) +
728 MV_SATAHC_ARBTR_REG_SZ +
729 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
732 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
734 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
735 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
737 return hc_mmio + ofs;
740 static inline void __iomem *mv_host_base(struct ata_host *host)
742 struct mv_host_priv *hpriv = host->private_data;
746 static inline void __iomem *mv_ap_base(struct ata_port *ap)
748 return mv_port_base(mv_host_base(ap->host), ap->port_no);
751 static inline int mv_get_hc_count(unsigned long port_flags)
753 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
756 static void mv_set_edma_ptrs(void __iomem *port_mmio,
757 struct mv_host_priv *hpriv,
758 struct mv_port_priv *pp)
763 * initialize request queue
765 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
767 WARN_ON(pp->crqb_dma & 0x3ff);
768 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
769 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
770 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
772 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
773 writelfl((pp->crqb_dma & 0xffffffff) | index,
774 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
776 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 * initialize response queue
781 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
783 WARN_ON(pp->crpb_dma & 0xff);
784 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
786 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
787 writelfl((pp->crpb_dma & 0xffffffff) | index,
788 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
790 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
792 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
793 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
797 * mv_start_dma - Enable eDMA engine
798 * @base: port base address
799 * @pp: port private data
801 * Verify the local cache of the eDMA state is accurate with a
805 * Inherited from caller.
807 static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
808 struct mv_port_priv *pp, u8 protocol)
810 int want_ncq = (protocol == ATA_PROT_NCQ);
812 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
813 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
814 if (want_ncq != using_ncq)
817 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
818 struct mv_host_priv *hpriv = ap->host->private_data;
819 int hard_port = mv_hardport_from_port(ap->port_no);
820 void __iomem *hc_mmio = mv_hc_base_from_port(
821 mv_host_base(ap->host), hard_port);
822 u32 hc_irq_cause, ipending;
824 /* clear EDMA event indicators, if any */
825 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
827 /* clear EDMA interrupt indicator, if any */
828 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
829 ipending = (DEV_IRQ << hard_port) |
830 (CRPB_DMA_DONE << hard_port);
831 if (hc_irq_cause & ipending) {
832 writelfl(hc_irq_cause & ~ipending,
833 hc_mmio + HC_IRQ_CAUSE_OFS);
836 mv_edma_cfg(ap, want_ncq);
838 /* clear FIS IRQ Cause */
839 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
841 mv_set_edma_ptrs(port_mmio, hpriv, pp);
843 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
844 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
846 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
850 * mv_stop_edma_engine - Disable eDMA engine
851 * @port_mmio: io base address
854 * Inherited from caller.
856 static int mv_stop_edma_engine(void __iomem *port_mmio)
860 /* Disable eDMA. The disable bit auto clears. */
861 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
863 /* Wait for the chip to confirm eDMA is off. */
864 for (i = 10000; i > 0; i--) {
865 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
866 if (!(reg & EDMA_EN))
873 static int mv_stop_edma(struct ata_port *ap)
875 void __iomem *port_mmio = mv_ap_base(ap);
876 struct mv_port_priv *pp = ap->private_data;
878 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
880 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
881 if (mv_stop_edma_engine(port_mmio)) {
882 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
889 static void mv_dump_mem(void __iomem *start, unsigned bytes)
892 for (b = 0; b < bytes; ) {
893 DPRINTK("%p: ", start + b);
894 for (w = 0; b < bytes && w < 4; w++) {
895 printk("%08x ", readl(start + b));
903 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
908 for (b = 0; b < bytes; ) {
909 DPRINTK("%02x: ", b);
910 for (w = 0; b < bytes && w < 4; w++) {
911 (void) pci_read_config_dword(pdev, b, &dw);
919 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
920 struct pci_dev *pdev)
923 void __iomem *hc_base = mv_hc_base(mmio_base,
924 port >> MV_PORT_HC_SHIFT);
925 void __iomem *port_base;
926 int start_port, num_ports, p, start_hc, num_hcs, hc;
929 start_hc = start_port = 0;
930 num_ports = 8; /* shld be benign for 4 port devs */
933 start_hc = port >> MV_PORT_HC_SHIFT;
935 num_ports = num_hcs = 1;
937 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
938 num_ports > 1 ? num_ports - 1 : start_port);
941 DPRINTK("PCI config space regs:\n");
942 mv_dump_pci_cfg(pdev, 0x68);
944 DPRINTK("PCI regs:\n");
945 mv_dump_mem(mmio_base+0xc00, 0x3c);
946 mv_dump_mem(mmio_base+0xd00, 0x34);
947 mv_dump_mem(mmio_base+0xf00, 0x4);
948 mv_dump_mem(mmio_base+0x1d00, 0x6c);
949 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
950 hc_base = mv_hc_base(mmio_base, hc);
951 DPRINTK("HC regs (HC %i):\n", hc);
952 mv_dump_mem(hc_base, 0x1c);
954 for (p = start_port; p < start_port + num_ports; p++) {
955 port_base = mv_port_base(mmio_base, p);
956 DPRINTK("EDMA regs (port %i):\n", p);
957 mv_dump_mem(port_base, 0x54);
958 DPRINTK("SATA regs (port %i):\n", p);
959 mv_dump_mem(port_base+0x300, 0x60);
964 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
972 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
975 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
984 static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
986 unsigned int ofs = mv_scr_offset(sc_reg_in);
988 if (ofs != 0xffffffffU) {
989 *val = readl(mv_ap_base(ap) + ofs);
995 static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
997 unsigned int ofs = mv_scr_offset(sc_reg_in);
999 if (ofs != 0xffffffffU) {
1000 writelfl(val, mv_ap_base(ap) + ofs);
1006 static void mv6_dev_config(struct ata_device *adev)
1009 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1010 * See mv_qc_prep() for more info.
1012 if (adev->flags & ATA_DFLAG_NCQ)
1013 if (adev->max_sectors > ATA_MAX_SECTORS)
1014 adev->max_sectors = ATA_MAX_SECTORS;
1017 static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1020 struct mv_port_priv *pp = ap->private_data;
1021 struct mv_host_priv *hpriv = ap->host->private_data;
1022 void __iomem *port_mmio = mv_ap_base(ap);
1024 /* set up non-NCQ EDMA configuration */
1025 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1027 if (IS_GEN_I(hpriv))
1028 cfg |= (1 << 8); /* enab config burst size mask */
1030 else if (IS_GEN_II(hpriv))
1031 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1033 else if (IS_GEN_IIE(hpriv)) {
1034 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1035 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1036 cfg |= (1 << 18); /* enab early completion */
1037 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1041 cfg |= EDMA_CFG_NCQ;
1042 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1044 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1046 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1049 static void mv_port_free_dma_mem(struct ata_port *ap)
1051 struct mv_host_priv *hpriv = ap->host->private_data;
1052 struct mv_port_priv *pp = ap->private_data;
1056 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1060 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1064 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1065 * For later hardware, we have one unique sg_tbl per NCQ tag.
1067 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1068 if (pp->sg_tbl[tag]) {
1069 if (tag == 0 || !IS_GEN_I(hpriv))
1070 dma_pool_free(hpriv->sg_tbl_pool,
1072 pp->sg_tbl_dma[tag]);
1073 pp->sg_tbl[tag] = NULL;
1079 * mv_port_start - Port specific init/start routine.
1080 * @ap: ATA channel to manipulate
1082 * Allocate and point to DMA memory, init port private memory,
1086 * Inherited from caller.
1088 static int mv_port_start(struct ata_port *ap)
1090 struct device *dev = ap->host->dev;
1091 struct mv_host_priv *hpriv = ap->host->private_data;
1092 struct mv_port_priv *pp;
1093 void __iomem *port_mmio = mv_ap_base(ap);
1094 unsigned long flags;
1097 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1100 ap->private_data = pp;
1102 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1105 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
1107 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1109 goto out_port_free_dma_mem;
1110 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
1113 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1114 * For later hardware, we need one unique sg_tbl per NCQ tag.
1116 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1117 if (tag == 0 || !IS_GEN_I(hpriv)) {
1118 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1119 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1120 if (!pp->sg_tbl[tag])
1121 goto out_port_free_dma_mem;
1123 pp->sg_tbl[tag] = pp->sg_tbl[0];
1124 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1128 spin_lock_irqsave(&ap->host->lock, flags);
1131 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1133 spin_unlock_irqrestore(&ap->host->lock, flags);
1135 /* Don't turn on EDMA here...do it before DMA commands only. Else
1136 * we'll be unable to send non-data, PIO, etc due to restricted access
1141 out_port_free_dma_mem:
1142 mv_port_free_dma_mem(ap);
1147 * mv_port_stop - Port specific cleanup/stop routine.
1148 * @ap: ATA channel to manipulate
1150 * Stop DMA, cleanup port memory.
1153 * This routine uses the host lock to protect the DMA stop.
1155 static void mv_port_stop(struct ata_port *ap)
1158 mv_port_free_dma_mem(ap);
1162 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1163 * @qc: queued command whose SG list to source from
1165 * Populate the SG list and mark the last entry.
1168 * Inherited from caller.
1170 static void mv_fill_sg(struct ata_queued_cmd *qc)
1172 struct mv_port_priv *pp = qc->ap->private_data;
1173 struct scatterlist *sg;
1174 struct mv_sg *mv_sg, *last_sg = NULL;
1177 mv_sg = pp->sg_tbl[qc->tag];
1178 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1179 dma_addr_t addr = sg_dma_address(sg);
1180 u32 sg_len = sg_dma_len(sg);
1183 u32 offset = addr & 0xffff;
1186 if ((offset + sg_len > 0x10000))
1187 len = 0x10000 - offset;
1189 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1190 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1191 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
1201 if (likely(last_sg))
1202 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1205 static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1207 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1208 (last ? CRQB_CMD_LAST : 0);
1209 *cmdw = cpu_to_le16(tmp);
1213 * mv_qc_prep - Host specific command preparation.
1214 * @qc: queued command to prepare
1216 * This routine simply redirects to the general purpose routine
1217 * if command is not DMA. Else, it handles prep of the CRQB
1218 * (command request block), does some sanity checking, and calls
1219 * the SG load routine.
1222 * Inherited from caller.
1224 static void mv_qc_prep(struct ata_queued_cmd *qc)
1226 struct ata_port *ap = qc->ap;
1227 struct mv_port_priv *pp = ap->private_data;
1229 struct ata_taskfile *tf;
1233 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1234 (qc->tf.protocol != ATA_PROT_NCQ))
1237 /* Fill in command request block
1239 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1240 flags |= CRQB_FLAG_READ;
1241 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1242 flags |= qc->tag << CRQB_TAG_SHIFT;
1244 /* get current queue index from software */
1245 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1247 pp->crqb[in_index].sg_addr =
1248 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1249 pp->crqb[in_index].sg_addr_hi =
1250 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1251 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1253 cw = &pp->crqb[in_index].ata_cmd[0];
1256 /* Sadly, the CRQB cannot accomodate all registers--there are
1257 * only 11 bytes...so we must pick and choose required
1258 * registers based on the command. So, we drop feature and
1259 * hob_feature for [RW] DMA commands, but they are needed for
1260 * NCQ. NCQ will drop hob_nsect.
1262 switch (tf->command) {
1264 case ATA_CMD_READ_EXT:
1266 case ATA_CMD_WRITE_EXT:
1267 case ATA_CMD_WRITE_FUA_EXT:
1268 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1270 case ATA_CMD_FPDMA_READ:
1271 case ATA_CMD_FPDMA_WRITE:
1272 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1273 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1276 /* The only other commands EDMA supports in non-queued and
1277 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1278 * of which are defined/used by Linux. If we get here, this
1279 * driver needs work.
1281 * FIXME: modify libata to give qc_prep a return value and
1282 * return error here.
1284 BUG_ON(tf->command);
1287 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1288 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1289 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1290 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1291 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1292 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1293 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1294 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1295 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1297 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1303 * mv_qc_prep_iie - Host specific command preparation.
1304 * @qc: queued command to prepare
1306 * This routine simply redirects to the general purpose routine
1307 * if command is not DMA. Else, it handles prep of the CRQB
1308 * (command request block), does some sanity checking, and calls
1309 * the SG load routine.
1312 * Inherited from caller.
1314 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1316 struct ata_port *ap = qc->ap;
1317 struct mv_port_priv *pp = ap->private_data;
1318 struct mv_crqb_iie *crqb;
1319 struct ata_taskfile *tf;
1323 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1324 (qc->tf.protocol != ATA_PROT_NCQ))
1327 /* Fill in Gen IIE command request block */
1328 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1329 flags |= CRQB_FLAG_READ;
1331 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1332 flags |= qc->tag << CRQB_TAG_SHIFT;
1333 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
1335 /* get current queue index from software */
1336 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1338 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1339 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1340 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
1341 crqb->flags = cpu_to_le32(flags);
1344 crqb->ata_cmd[0] = cpu_to_le32(
1345 (tf->command << 16) |
1348 crqb->ata_cmd[1] = cpu_to_le32(
1354 crqb->ata_cmd[2] = cpu_to_le32(
1355 (tf->hob_lbal << 0) |
1356 (tf->hob_lbam << 8) |
1357 (tf->hob_lbah << 16) |
1358 (tf->hob_feature << 24)
1360 crqb->ata_cmd[3] = cpu_to_le32(
1362 (tf->hob_nsect << 8)
1365 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1371 * mv_qc_issue - Initiate a command to the host
1372 * @qc: queued command to start
1374 * This routine simply redirects to the general purpose routine
1375 * if command is not DMA. Else, it sanity checks our local
1376 * caches of the request producer/consumer indices then enables
1377 * DMA and bumps the request producer index.
1380 * Inherited from caller.
1382 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1384 struct ata_port *ap = qc->ap;
1385 void __iomem *port_mmio = mv_ap_base(ap);
1386 struct mv_port_priv *pp = ap->private_data;
1389 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1390 (qc->tf.protocol != ATA_PROT_NCQ)) {
1392 * We're about to send a non-EDMA capable command to the
1393 * port. Turn off EDMA so there won't be problems accessing
1394 * shadow block, etc registers.
1397 return ata_sff_qc_issue(qc);
1400 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1404 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1406 /* and write the request in pointer to kick the EDMA to life */
1407 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1408 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1414 * mv_err_intr - Handle error interrupts on the port
1415 * @ap: ATA channel to manipulate
1416 * @reset_allowed: bool: 0 == don't trigger from reset here
1418 * In most cases, just clear the interrupt and move on. However,
1419 * some cases require an eDMA reset, which also performs a COMRESET.
1420 * The SERR case requires a clear of pending errors in the SATA
1421 * SERROR register. Finally, if the port disabled DMA,
1422 * update our cached copy to match.
1425 * Inherited from caller.
1427 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1429 void __iomem *port_mmio = mv_ap_base(ap);
1430 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1431 struct mv_port_priv *pp = ap->private_data;
1432 struct mv_host_priv *hpriv = ap->host->private_data;
1433 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1434 unsigned int action = 0, err_mask = 0;
1435 struct ata_eh_info *ehi = &ap->link.eh_info;
1437 ata_ehi_clear_desc(ehi);
1439 if (!edma_enabled) {
1440 /* just a guess: do we need to do this? should we
1441 * expand this, and do it in all cases?
1443 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1444 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1447 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1449 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1452 * all generations share these EDMA error cause bits
1455 if (edma_err_cause & EDMA_ERR_DEV)
1456 err_mask |= AC_ERR_DEV;
1457 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1458 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1459 EDMA_ERR_INTRL_PAR)) {
1460 err_mask |= AC_ERR_ATA_BUS;
1461 action |= ATA_EH_RESET;
1462 ata_ehi_push_desc(ehi, "parity error");
1464 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1465 ata_ehi_hotplugged(ehi);
1466 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1467 "dev disconnect" : "dev connect");
1468 action |= ATA_EH_RESET;
1471 if (IS_GEN_I(hpriv)) {
1472 eh_freeze_mask = EDMA_EH_FREEZE_5;
1474 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1475 pp = ap->private_data;
1476 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1477 ata_ehi_push_desc(ehi, "EDMA self-disable");
1480 eh_freeze_mask = EDMA_EH_FREEZE;
1482 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1483 pp = ap->private_data;
1484 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1485 ata_ehi_push_desc(ehi, "EDMA self-disable");
1488 if (edma_err_cause & EDMA_ERR_SERR) {
1489 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1490 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1491 err_mask = AC_ERR_ATA_BUS;
1492 action |= ATA_EH_RESET;
1496 /* Clear EDMA now that SERR cleanup done */
1497 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1500 err_mask = AC_ERR_OTHER;
1501 action |= ATA_EH_RESET;
1504 ehi->serror |= serr;
1505 ehi->action |= action;
1508 qc->err_mask |= err_mask;
1510 ehi->err_mask |= err_mask;
1512 if (edma_err_cause & eh_freeze_mask)
1513 ata_port_freeze(ap);
1518 static void mv_intr_pio(struct ata_port *ap)
1520 struct ata_queued_cmd *qc;
1523 /* ignore spurious intr if drive still BUSY */
1524 ata_status = readb(ap->ioaddr.status_addr);
1525 if (unlikely(ata_status & ATA_BUSY))
1528 /* get active ATA command */
1529 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1530 if (unlikely(!qc)) /* no active tag */
1532 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1535 /* and finally, complete the ATA command */
1536 qc->err_mask |= ac_err_mask(ata_status);
1537 ata_qc_complete(qc);
1540 static void mv_intr_edma(struct ata_port *ap)
1542 void __iomem *port_mmio = mv_ap_base(ap);
1543 struct mv_host_priv *hpriv = ap->host->private_data;
1544 struct mv_port_priv *pp = ap->private_data;
1545 struct ata_queued_cmd *qc;
1546 u32 out_index, in_index;
1547 bool work_done = false;
1549 /* get h/w response queue pointer */
1550 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1551 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1557 /* get s/w response queue last-read pointer, and compare */
1558 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1559 if (in_index == out_index)
1562 /* 50xx: get active ATA command */
1563 if (IS_GEN_I(hpriv))
1564 tag = ap->link.active_tag;
1566 /* Gen II/IIE: get active ATA command via tag, to enable
1567 * support for queueing. this works transparently for
1568 * queued and non-queued modes.
1571 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
1573 qc = ata_qc_from_tag(ap, tag);
1575 /* For non-NCQ mode, the lower 8 bits of status
1576 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1577 * which should be zero if all went well.
1579 status = le16_to_cpu(pp->crpb[out_index].flags);
1580 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
1581 mv_err_intr(ap, qc);
1585 /* and finally, complete the ATA command */
1588 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1589 ata_qc_complete(qc);
1592 /* advance software response queue pointer, to
1593 * indicate (after the loop completes) to hardware
1594 * that we have consumed a response queue entry.
1601 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1602 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1603 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1607 * mv_host_intr - Handle all interrupts on the given host controller
1608 * @host: host specific structure
1609 * @relevant: port error bits relevant to this host controller
1610 * @hc: which host controller we're to look at
1612 * Read then write clear the HC interrupt status then walk each
1613 * port connected to the HC and see if it needs servicing. Port
1614 * success ints are reported in the HC interrupt status reg, the
1615 * port error ints are reported in the higher level main
1616 * interrupt status register and thus are passed in via the
1617 * 'relevant' argument.
1620 * Inherited from caller.
1622 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1624 struct mv_host_priv *hpriv = host->private_data;
1625 void __iomem *mmio = hpriv->base;
1626 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1628 int port, port0, last_port;
1633 port0 = MV_PORTS_PER_HC;
1636 last_port = port0 + MV_PORTS_PER_HC;
1638 last_port = port0 + hpriv->n_ports;
1639 /* we'll need the HC success int register in most cases */
1640 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1644 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1646 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1647 hc, relevant, hc_irq_cause);
1649 for (port = port0; port < last_port; port++) {
1650 struct ata_port *ap = host->ports[port];
1651 struct mv_port_priv *pp;
1652 int have_err_bits, hard_port, shift;
1654 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1657 pp = ap->private_data;
1659 shift = port << 1; /* (port * 2) */
1660 if (port >= MV_PORTS_PER_HC)
1661 shift++; /* skip bit 8 in the HC Main IRQ reg */
1663 have_err_bits = ((PORT0_ERR << shift) & relevant);
1665 if (unlikely(have_err_bits)) {
1666 struct ata_queued_cmd *qc;
1668 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1669 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1672 mv_err_intr(ap, qc);
1676 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1678 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1679 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1682 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1689 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1691 struct mv_host_priv *hpriv = host->private_data;
1692 struct ata_port *ap;
1693 struct ata_queued_cmd *qc;
1694 struct ata_eh_info *ehi;
1695 unsigned int i, err_mask, printed = 0;
1698 err_cause = readl(mmio + hpriv->irq_cause_ofs);
1700 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1703 DPRINTK("All regs @ PCI error\n");
1704 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1706 writelfl(0, mmio + hpriv->irq_cause_ofs);
1708 for (i = 0; i < host->n_ports; i++) {
1709 ap = host->ports[i];
1710 if (!ata_link_offline(&ap->link)) {
1711 ehi = &ap->link.eh_info;
1712 ata_ehi_clear_desc(ehi);
1714 ata_ehi_push_desc(ehi,
1715 "PCI err cause 0x%08x", err_cause);
1716 err_mask = AC_ERR_HOST_BUS;
1717 ehi->action = ATA_EH_RESET;
1718 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1720 qc->err_mask |= err_mask;
1722 ehi->err_mask |= err_mask;
1724 ata_port_freeze(ap);
1730 * mv_interrupt - Main interrupt event handler
1732 * @dev_instance: private data; in this case the host structure
1734 * Read the read only register to determine if any host
1735 * controllers have pending interrupts. If so, call lower level
1736 * routine to handle. Also check for PCI errors which are only
1740 * This routine holds the host lock while processing pending
1743 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1745 struct ata_host *host = dev_instance;
1746 struct mv_host_priv *hpriv = host->private_data;
1747 unsigned int hc, handled = 0, n_hcs;
1748 void __iomem *mmio = hpriv->base;
1749 u32 irq_stat, irq_mask;
1751 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1752 spin_lock(&host->lock);
1754 irq_stat = readl(hpriv->main_cause_reg_addr);
1755 irq_mask = readl(hpriv->main_mask_reg_addr);
1757 /* check the cases where we either have nothing pending or have read
1758 * a bogus register value which can indicate HW removal or PCI fault
1760 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1763 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1765 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
1766 mv_pci_error(host, mmio);
1768 goto out_unlock; /* skip all other HC irq handling */
1771 for (hc = 0; hc < n_hcs; hc++) {
1772 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1774 mv_host_intr(host, relevant, hc);
1780 spin_unlock(&host->lock);
1782 return IRQ_RETVAL(handled);
1785 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1789 switch (sc_reg_in) {
1793 ofs = sc_reg_in * sizeof(u32);
1802 static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
1804 struct mv_host_priv *hpriv = ap->host->private_data;
1805 void __iomem *mmio = hpriv->base;
1806 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1807 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1809 if (ofs != 0xffffffffU) {
1810 *val = readl(addr + ofs);
1816 static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1818 struct mv_host_priv *hpriv = ap->host->private_data;
1819 void __iomem *mmio = hpriv->base;
1820 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1821 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1823 if (ofs != 0xffffffffU) {
1824 writelfl(val, addr + ofs);
1830 static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
1832 struct pci_dev *pdev = to_pci_dev(host->dev);
1835 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1838 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1840 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1843 mv_reset_pci_bus(host, mmio);
1846 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1848 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1851 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1854 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1857 tmp = readl(phy_mmio + MV5_PHY_MODE);
1859 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1860 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1863 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1867 writel(0, mmio + MV_GPIO_PORT_CTL);
1869 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1871 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1873 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1876 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1879 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1880 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1882 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1885 tmp = readl(phy_mmio + MV5_LT_MODE);
1887 writel(tmp, phy_mmio + MV5_LT_MODE);
1889 tmp = readl(phy_mmio + MV5_PHY_CTL);
1892 writel(tmp, phy_mmio + MV5_PHY_CTL);
1895 tmp = readl(phy_mmio + MV5_PHY_MODE);
1897 tmp |= hpriv->signal[port].pre;
1898 tmp |= hpriv->signal[port].amps;
1899 writel(tmp, phy_mmio + MV5_PHY_MODE);
1904 #define ZERO(reg) writel(0, port_mmio + (reg))
1905 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1908 void __iomem *port_mmio = mv_port_base(mmio, port);
1911 * The datasheet warns against setting ATA_RST when EDMA is active
1912 * (but doesn't say what the problem might be). So we first try
1913 * to disable the EDMA engine before doing the ATA_RST operation.
1915 mv_reset_channel(hpriv, mmio, port);
1917 ZERO(0x028); /* command */
1918 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1919 ZERO(0x004); /* timer */
1920 ZERO(0x008); /* irq err cause */
1921 ZERO(0x00c); /* irq err mask */
1922 ZERO(0x010); /* rq bah */
1923 ZERO(0x014); /* rq inp */
1924 ZERO(0x018); /* rq outp */
1925 ZERO(0x01c); /* respq bah */
1926 ZERO(0x024); /* respq outp */
1927 ZERO(0x020); /* respq inp */
1928 ZERO(0x02c); /* test control */
1929 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1933 #define ZERO(reg) writel(0, hc_mmio + (reg))
1934 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1937 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1945 tmp = readl(hc_mmio + 0x20);
1948 writel(tmp, hc_mmio + 0x20);
1952 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1955 unsigned int hc, port;
1957 for (hc = 0; hc < n_hc; hc++) {
1958 for (port = 0; port < MV_PORTS_PER_HC; port++)
1959 mv5_reset_hc_port(hpriv, mmio,
1960 (hc * MV_PORTS_PER_HC) + port);
1962 mv5_reset_one_hc(hpriv, mmio, hc);
1969 #define ZERO(reg) writel(0, mmio + (reg))
1970 static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
1972 struct mv_host_priv *hpriv = host->private_data;
1975 tmp = readl(mmio + MV_PCI_MODE);
1977 writel(tmp, mmio + MV_PCI_MODE);
1979 ZERO(MV_PCI_DISC_TIMER);
1980 ZERO(MV_PCI_MSI_TRIGGER);
1981 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1982 ZERO(HC_MAIN_IRQ_MASK_OFS);
1983 ZERO(MV_PCI_SERR_MASK);
1984 ZERO(hpriv->irq_cause_ofs);
1985 ZERO(hpriv->irq_mask_ofs);
1986 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1987 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1988 ZERO(MV_PCI_ERR_ATTRIBUTE);
1989 ZERO(MV_PCI_ERR_COMMAND);
1993 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1997 mv5_reset_flash(hpriv, mmio);
1999 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2001 tmp |= (1 << 5) | (1 << 6);
2002 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2006 * mv6_reset_hc - Perform the 6xxx global soft reset
2007 * @mmio: base address of the HBA
2009 * This routine only applies to 6xxx parts.
2012 * Inherited from caller.
2014 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2017 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2021 /* Following procedure defined in PCI "main command and status
2025 writel(t | STOP_PCI_MASTER, reg);
2027 for (i = 0; i < 1000; i++) {
2030 if (PCI_MASTER_EMPTY & t)
2033 if (!(PCI_MASTER_EMPTY & t)) {
2034 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2042 writel(t | GLOB_SFT_RST, reg);
2045 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2047 if (!(GLOB_SFT_RST & t)) {
2048 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2053 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2056 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2059 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2061 if (GLOB_SFT_RST & t) {
2062 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2069 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2072 void __iomem *port_mmio;
2075 tmp = readl(mmio + MV_RESET_CFG);
2076 if ((tmp & (1 << 0)) == 0) {
2077 hpriv->signal[idx].amps = 0x7 << 8;
2078 hpriv->signal[idx].pre = 0x1 << 5;
2082 port_mmio = mv_port_base(mmio, idx);
2083 tmp = readl(port_mmio + PHY_MODE2);
2085 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2086 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2089 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2091 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2094 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2097 void __iomem *port_mmio = mv_port_base(mmio, port);
2099 u32 hp_flags = hpriv->hp_flags;
2101 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2103 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2106 if (fix_phy_mode2) {
2107 m2 = readl(port_mmio + PHY_MODE2);
2110 writel(m2, port_mmio + PHY_MODE2);
2114 m2 = readl(port_mmio + PHY_MODE2);
2115 m2 &= ~((1 << 16) | (1 << 31));
2116 writel(m2, port_mmio + PHY_MODE2);
2121 /* who knows what this magic does */
2122 tmp = readl(port_mmio + PHY_MODE3);
2125 writel(tmp, port_mmio + PHY_MODE3);
2127 if (fix_phy_mode4) {
2130 m4 = readl(port_mmio + PHY_MODE4);
2132 if (hp_flags & MV_HP_ERRATA_60X1B2)
2133 tmp = readl(port_mmio + PHY_MODE3);
2135 /* workaround for errata FEr SATA#10 (part 1) */
2136 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2138 writel(m4, port_mmio + PHY_MODE4);
2140 if (hp_flags & MV_HP_ERRATA_60X1B2)
2141 writel(tmp, port_mmio + PHY_MODE3);
2144 /* Revert values of pre-emphasis and signal amps to the saved ones */
2145 m2 = readl(port_mmio + PHY_MODE2);
2147 m2 &= ~MV_M2_PREAMP_MASK;
2148 m2 |= hpriv->signal[port].amps;
2149 m2 |= hpriv->signal[port].pre;
2152 /* according to mvSata 3.6.1, some IIE values are fixed */
2153 if (IS_GEN_IIE(hpriv)) {
2158 writel(m2, port_mmio + PHY_MODE2);
2161 /* TODO: use the generic LED interface to configure the SATA Presence */
2162 /* & Acitivy LEDs on the board */
2163 static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2169 static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2172 void __iomem *port_mmio;
2175 port_mmio = mv_port_base(mmio, idx);
2176 tmp = readl(port_mmio + PHY_MODE2);
2178 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2179 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2183 #define ZERO(reg) writel(0, port_mmio + (reg))
2184 static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2185 void __iomem *mmio, unsigned int port)
2187 void __iomem *port_mmio = mv_port_base(mmio, port);
2190 * The datasheet warns against setting ATA_RST when EDMA is active
2191 * (but doesn't say what the problem might be). So we first try
2192 * to disable the EDMA engine before doing the ATA_RST operation.
2194 mv_reset_channel(hpriv, mmio, port);
2196 ZERO(0x028); /* command */
2197 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2198 ZERO(0x004); /* timer */
2199 ZERO(0x008); /* irq err cause */
2200 ZERO(0x00c); /* irq err mask */
2201 ZERO(0x010); /* rq bah */
2202 ZERO(0x014); /* rq inp */
2203 ZERO(0x018); /* rq outp */
2204 ZERO(0x01c); /* respq bah */
2205 ZERO(0x024); /* respq outp */
2206 ZERO(0x020); /* respq inp */
2207 ZERO(0x02c); /* test control */
2208 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2213 #define ZERO(reg) writel(0, hc_mmio + (reg))
2214 static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2217 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2227 static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2228 void __iomem *mmio, unsigned int n_hc)
2232 for (port = 0; port < hpriv->n_ports; port++)
2233 mv_soc_reset_hc_port(hpriv, mmio, port);
2235 mv_soc_reset_one_hc(hpriv, mmio);
2240 static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2246 static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2251 static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2253 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2255 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2257 ifctl |= (1 << 7); /* enable gen2i speed */
2258 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2262 * Caller must ensure that EDMA is not active,
2263 * by first doing mv_stop_edma() where needed.
2265 static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
2266 unsigned int port_no)
2268 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2270 mv_stop_edma_engine(port_mmio);
2271 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2273 if (!IS_GEN_I(hpriv)) {
2274 /* Enable 3.0gb/s link speed */
2275 mv_setup_ifctl(port_mmio, 1);
2278 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2279 * link, and physical layers. It resets all SATA interface registers
2280 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
2282 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2283 udelay(25); /* allow reset propagation */
2284 writelfl(0, port_mmio + EDMA_CMD_OFS);
2286 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2288 if (IS_GEN_I(hpriv))
2292 static int mv_hardreset(struct ata_link *link, unsigned int *class,
2293 unsigned long deadline)
2295 struct ata_port *ap = link->ap;
2296 struct mv_host_priv *hpriv = ap->host->private_data;
2297 struct mv_port_priv *pp = ap->private_data;
2298 void __iomem *mmio = hpriv->base;
2299 int rc, attempts = 0, extra = 0;
2303 mv_reset_channel(hpriv, mmio, ap->port_no);
2304 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2306 /* Workaround for errata FEr SATA#10 (part 2) */
2308 const unsigned long *timing =
2309 sata_ehc_deb_timing(&link->eh_context);
2311 rc = sata_link_hardreset(link, timing, deadline + extra,
2315 sata_scr_read(link, SCR_STATUS, &sstatus);
2316 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2317 /* Force 1.5gb/s link speed and try again */
2318 mv_setup_ifctl(mv_ap_base(ap), 0);
2319 if (time_after(jiffies + HZ, deadline))
2320 extra = HZ; /* only extend it once, max */
2322 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
2327 static void mv_eh_freeze(struct ata_port *ap)
2329 struct mv_host_priv *hpriv = ap->host->private_data;
2330 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2334 /* FIXME: handle coalescing completion events properly */
2336 shift = ap->port_no * 2;
2340 mask = 0x3 << shift;
2342 /* disable assertion of portN err, done events */
2343 tmp = readl(hpriv->main_mask_reg_addr);
2344 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
2347 static void mv_eh_thaw(struct ata_port *ap)
2349 struct mv_host_priv *hpriv = ap->host->private_data;
2350 void __iomem *mmio = hpriv->base;
2351 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2352 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2353 void __iomem *port_mmio = mv_ap_base(ap);
2354 u32 tmp, mask, hc_irq_cause;
2355 unsigned int shift, hc_port_no = ap->port_no;
2357 /* FIXME: handle coalescing completion events properly */
2359 shift = ap->port_no * 2;
2365 mask = 0x3 << shift;
2367 /* clear EDMA errors on this port */
2368 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2370 /* clear pending irq events */
2371 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2372 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2373 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2374 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2376 /* enable assertion of portN err, done events */
2377 tmp = readl(hpriv->main_mask_reg_addr);
2378 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
2382 * mv_port_init - Perform some early initialization on a single port.
2383 * @port: libata data structure storing shadow register addresses
2384 * @port_mmio: base address of the port
2386 * Initialize shadow register mmio addresses, clear outstanding
2387 * interrupts on the port, and unmask interrupts for the future
2388 * start of the port.
2391 * Inherited from caller.
2393 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2395 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2398 /* PIO related setup
2400 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2402 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2403 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2404 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2405 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2406 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2407 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2409 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2410 /* special case: control/altstatus doesn't have ATA_REG_ address */
2411 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2414 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2416 /* Clear any currently outstanding port interrupt conditions */
2417 serr_ofs = mv_scr_offset(SCR_ERROR);
2418 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2419 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2421 /* unmask all non-transient EDMA error interrupts */
2422 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2424 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2425 readl(port_mmio + EDMA_CFG_OFS),
2426 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2427 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2430 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2432 struct pci_dev *pdev = to_pci_dev(host->dev);
2433 struct mv_host_priv *hpriv = host->private_data;
2434 u32 hp_flags = hpriv->hp_flags;
2436 switch (board_idx) {
2438 hpriv->ops = &mv5xxx_ops;
2439 hp_flags |= MV_HP_GEN_I;
2441 switch (pdev->revision) {
2443 hp_flags |= MV_HP_ERRATA_50XXB0;
2446 hp_flags |= MV_HP_ERRATA_50XXB2;
2449 dev_printk(KERN_WARNING, &pdev->dev,
2450 "Applying 50XXB2 workarounds to unknown rev\n");
2451 hp_flags |= MV_HP_ERRATA_50XXB2;
2458 hpriv->ops = &mv5xxx_ops;
2459 hp_flags |= MV_HP_GEN_I;
2461 switch (pdev->revision) {
2463 hp_flags |= MV_HP_ERRATA_50XXB0;
2466 hp_flags |= MV_HP_ERRATA_50XXB2;
2469 dev_printk(KERN_WARNING, &pdev->dev,
2470 "Applying B2 workarounds to unknown rev\n");
2471 hp_flags |= MV_HP_ERRATA_50XXB2;
2478 hpriv->ops = &mv6xxx_ops;
2479 hp_flags |= MV_HP_GEN_II;
2481 switch (pdev->revision) {
2483 hp_flags |= MV_HP_ERRATA_60X1B2;
2486 hp_flags |= MV_HP_ERRATA_60X1C0;
2489 dev_printk(KERN_WARNING, &pdev->dev,
2490 "Applying B2 workarounds to unknown rev\n");
2491 hp_flags |= MV_HP_ERRATA_60X1B2;
2497 hp_flags |= MV_HP_PCIE;
2498 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2499 (pdev->device == 0x2300 || pdev->device == 0x2310))
2502 * Highpoint RocketRAID PCIe 23xx series cards:
2504 * Unconfigured drives are treated as "Legacy"
2505 * by the BIOS, and it overwrites sector 8 with
2506 * a "Lgcy" metadata block prior to Linux boot.
2508 * Configured drives (RAID or JBOD) leave sector 8
2509 * alone, but instead overwrite a high numbered
2510 * sector for the RAID metadata. This sector can
2511 * be determined exactly, by truncating the physical
2512 * drive capacity to a nice even GB value.
2514 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2516 * Warn the user, lest they think we're just buggy.
2518 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2519 " BIOS CORRUPTS DATA on all attached drives,"
2520 " regardless of if/how they are configured."
2522 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2523 " use sectors 8-9 on \"Legacy\" drives,"
2524 " and avoid the final two gigabytes on"
2525 " all RocketRAID BIOS initialized drives.\n");
2528 hpriv->ops = &mv6xxx_ops;
2529 hp_flags |= MV_HP_GEN_IIE;
2531 switch (pdev->revision) {
2533 hp_flags |= MV_HP_ERRATA_XX42A0;
2536 hp_flags |= MV_HP_ERRATA_60X1C0;
2539 dev_printk(KERN_WARNING, &pdev->dev,
2540 "Applying 60X1C0 workarounds to unknown rev\n");
2541 hp_flags |= MV_HP_ERRATA_60X1C0;
2546 hpriv->ops = &mv_soc_ops;
2547 hp_flags |= MV_HP_ERRATA_60X1C0;
2551 dev_printk(KERN_ERR, host->dev,
2552 "BUG: invalid board index %u\n", board_idx);
2556 hpriv->hp_flags = hp_flags;
2557 if (hp_flags & MV_HP_PCIE) {
2558 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2559 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2560 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2562 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2563 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2564 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2571 * mv_init_host - Perform some early initialization of the host.
2572 * @host: ATA host to initialize
2573 * @board_idx: controller index
2575 * If possible, do an early global reset of the host. Then do
2576 * our port init and clear/unmask all/relevant host interrupts.
2579 * Inherited from caller.
2581 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2583 int rc = 0, n_hc, port, hc;
2584 struct mv_host_priv *hpriv = host->private_data;
2585 void __iomem *mmio = hpriv->base;
2587 rc = mv_chip_id(host, board_idx);
2591 if (HAS_PCI(host)) {
2592 hpriv->main_cause_reg_addr = hpriv->base +
2593 HC_MAIN_IRQ_CAUSE_OFS;
2594 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2596 hpriv->main_cause_reg_addr = hpriv->base +
2597 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2598 hpriv->main_mask_reg_addr = hpriv->base +
2599 HC_SOC_MAIN_IRQ_MASK_OFS;
2601 /* global interrupt mask */
2602 writel(0, hpriv->main_mask_reg_addr);
2604 n_hc = mv_get_hc_count(host->ports[0]->flags);
2606 for (port = 0; port < host->n_ports; port++)
2607 hpriv->ops->read_preamp(hpriv, port, mmio);
2609 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2613 hpriv->ops->reset_flash(hpriv, mmio);
2614 hpriv->ops->reset_bus(host, mmio);
2615 hpriv->ops->enable_leds(hpriv, mmio);
2617 for (port = 0; port < host->n_ports; port++) {
2618 struct ata_port *ap = host->ports[port];
2619 void __iomem *port_mmio = mv_port_base(mmio, port);
2621 mv_port_init(&ap->ioaddr, port_mmio);
2624 if (HAS_PCI(host)) {
2625 unsigned int offset = port_mmio - mmio;
2626 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2627 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2632 for (hc = 0; hc < n_hc; hc++) {
2633 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2635 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2636 "(before clear)=0x%08x\n", hc,
2637 readl(hc_mmio + HC_CFG_OFS),
2638 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2640 /* Clear any currently outstanding hc interrupt conditions */
2641 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2644 if (HAS_PCI(host)) {
2645 /* Clear any currently outstanding host interrupt conditions */
2646 writelfl(0, mmio + hpriv->irq_cause_ofs);
2648 /* and unmask interrupt generation for host regs */
2649 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2650 if (IS_GEN_I(hpriv))
2651 writelfl(~HC_MAIN_MASKED_IRQS_5,
2652 hpriv->main_mask_reg_addr);
2654 writelfl(~HC_MAIN_MASKED_IRQS,
2655 hpriv->main_mask_reg_addr);
2657 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2658 "PCI int cause/mask=0x%08x/0x%08x\n",
2659 readl(hpriv->main_cause_reg_addr),
2660 readl(hpriv->main_mask_reg_addr),
2661 readl(mmio + hpriv->irq_cause_ofs),
2662 readl(mmio + hpriv->irq_mask_ofs));
2664 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2665 hpriv->main_mask_reg_addr);
2666 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2667 readl(hpriv->main_cause_reg_addr),
2668 readl(hpriv->main_mask_reg_addr));
2674 static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2676 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2678 if (!hpriv->crqb_pool)
2681 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2683 if (!hpriv->crpb_pool)
2686 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2688 if (!hpriv->sg_tbl_pool)
2695 * mv_platform_probe - handle a positive probe of an soc Marvell
2697 * @pdev: platform device found
2700 * Inherited from caller.
2702 static int mv_platform_probe(struct platform_device *pdev)
2704 static int printed_version;
2705 const struct mv_sata_platform_data *mv_platform_data;
2706 const struct ata_port_info *ppi[] =
2707 { &mv_port_info[chip_soc], NULL };
2708 struct ata_host *host;
2709 struct mv_host_priv *hpriv;
2710 struct resource *res;
2713 if (!printed_version++)
2714 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2717 * Simple resource validation ..
2719 if (unlikely(pdev->num_resources != 2)) {
2720 dev_err(&pdev->dev, "invalid number of resources\n");
2725 * Get the register base first
2727 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2732 mv_platform_data = pdev->dev.platform_data;
2733 n_ports = mv_platform_data->n_ports;
2735 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2736 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2738 if (!host || !hpriv)
2740 host->private_data = hpriv;
2741 hpriv->n_ports = n_ports;
2744 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2745 res->end - res->start + 1);
2746 hpriv->base -= MV_SATAHC0_REG_BASE;
2748 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2752 /* initialize adapter */
2753 rc = mv_init_host(host, chip_soc);
2757 dev_printk(KERN_INFO, &pdev->dev,
2758 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2761 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2762 IRQF_SHARED, &mv6_sht);
2767 * mv_platform_remove - unplug a platform interface
2768 * @pdev: platform device
2770 * A platform bus SATA device has been unplugged. Perform the needed
2771 * cleanup. Also called on module unload for any active devices.
2773 static int __devexit mv_platform_remove(struct platform_device *pdev)
2775 struct device *dev = &pdev->dev;
2776 struct ata_host *host = dev_get_drvdata(dev);
2778 ata_host_detach(host);
2782 static struct platform_driver mv_platform_driver = {
2783 .probe = mv_platform_probe,
2784 .remove = __devexit_p(mv_platform_remove),
2787 .owner = THIS_MODULE,
2793 static int mv_pci_init_one(struct pci_dev *pdev,
2794 const struct pci_device_id *ent);
2797 static struct pci_driver mv_pci_driver = {
2799 .id_table = mv_pci_tbl,
2800 .probe = mv_pci_init_one,
2801 .remove = ata_pci_remove_one,
2807 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2810 /* move to PCI layer or libata core? */
2811 static int pci_go_64(struct pci_dev *pdev)
2815 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2816 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2818 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2820 dev_printk(KERN_ERR, &pdev->dev,
2821 "64-bit DMA enable failed\n");
2826 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2828 dev_printk(KERN_ERR, &pdev->dev,
2829 "32-bit DMA enable failed\n");
2832 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2834 dev_printk(KERN_ERR, &pdev->dev,
2835 "32-bit consistent DMA enable failed\n");
2844 * mv_print_info - Dump key info to kernel log for perusal.
2845 * @host: ATA host to print info about
2847 * FIXME: complete this.
2850 * Inherited from caller.
2852 static void mv_print_info(struct ata_host *host)
2854 struct pci_dev *pdev = to_pci_dev(host->dev);
2855 struct mv_host_priv *hpriv = host->private_data;
2857 const char *scc_s, *gen;
2859 /* Use this to determine the HW stepping of the chip so we know
2860 * what errata to workaround
2862 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2865 else if (scc == 0x01)
2870 if (IS_GEN_I(hpriv))
2872 else if (IS_GEN_II(hpriv))
2874 else if (IS_GEN_IIE(hpriv))
2879 dev_printk(KERN_INFO, &pdev->dev,
2880 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2881 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2882 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2886 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
2887 * @pdev: PCI device found
2888 * @ent: PCI device ID entry for the matched host
2891 * Inherited from caller.
2893 static int mv_pci_init_one(struct pci_dev *pdev,
2894 const struct pci_device_id *ent)
2896 static int printed_version;
2897 unsigned int board_idx = (unsigned int)ent->driver_data;
2898 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2899 struct ata_host *host;
2900 struct mv_host_priv *hpriv;
2903 if (!printed_version++)
2904 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2907 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2909 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2910 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2911 if (!host || !hpriv)
2913 host->private_data = hpriv;
2914 hpriv->n_ports = n_ports;
2916 /* acquire resources */
2917 rc = pcim_enable_device(pdev);
2921 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2923 pcim_pin_device(pdev);
2926 host->iomap = pcim_iomap_table(pdev);
2927 hpriv->base = host->iomap[MV_PRIMARY_BAR];
2929 rc = pci_go_64(pdev);
2933 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2937 /* initialize adapter */
2938 rc = mv_init_host(host, board_idx);
2942 /* Enable interrupts */
2943 if (msi && pci_enable_msi(pdev))
2946 mv_dump_pci_cfg(pdev, 0x68);
2947 mv_print_info(host);
2949 pci_set_master(pdev);
2950 pci_try_set_mwi(pdev);
2951 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2952 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2956 static int mv_platform_probe(struct platform_device *pdev);
2957 static int __devexit mv_platform_remove(struct platform_device *pdev);
2959 static int __init mv_init(void)
2963 rc = pci_register_driver(&mv_pci_driver);
2967 rc = platform_driver_register(&mv_platform_driver);
2971 pci_unregister_driver(&mv_pci_driver);
2976 static void __exit mv_exit(void)
2979 pci_unregister_driver(&mv_pci_driver);
2981 platform_driver_unregister(&mv_platform_driver);
2984 MODULE_AUTHOR("Brett Russ");
2985 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2986 MODULE_LICENSE("GPL");
2987 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2988 MODULE_VERSION(DRV_VERSION);
2989 MODULE_ALIAS("platform:" DRV_NAME);
2992 module_param(msi, int, 0444);
2993 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2996 module_init(mv_init);
2997 module_exit(mv_exit);