2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 2) Convert to LibATA new EH. Required for hotplug, NCQ, and sane
33 probing/error handling in general. MUST HAVE.
35 3) Add hotplug support (easy, once new-EH support appears)
37 4) Add NCQ support (easy to intermediate, once new-EH support appears)
39 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41 6) Add port multiplier support (intermediate)
43 7) Test and verify 3.0 Gbps support
45 8) Develop a low-power-consumption strategy, and implement it.
47 9) [Experiment, low priority] See if ATAPI can be supported using
48 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 10) [Experiment, low priority] Investigate interrupt coalescing.
52 Quite often, especially with PCI Message Signalled Interrupts (MSI),
53 the overhead reduced by interrupt mitigation is quite often not
54 worth the latency cost.
56 11) [Experiment, Marvell value added] Is it possible to use target
57 mode to cross-connect two Linux boxes with Marvell cards? If so,
58 creating LibATA target mode support would be very interesting.
60 Target mode, for those without docs, is the ability to directly
61 connect two SATA controllers.
63 13) Verify that 7042 is fully supported. I only have a 6042.
68 #include <linux/kernel.h>
69 #include <linux/module.h>
70 #include <linux/pci.h>
71 #include <linux/init.h>
72 #include <linux/blkdev.h>
73 #include <linux/delay.h>
74 #include <linux/interrupt.h>
75 #include <linux/dma-mapping.h>
76 #include <linux/device.h>
77 #include <scsi/scsi_host.h>
78 #include <scsi/scsi_cmnd.h>
79 #include <linux/libata.h>
81 #define DRV_NAME "sata_mv"
82 #define DRV_VERSION "0.81"
85 /* BAR's are enumerated in terms of pci_resource_start() terms */
86 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
87 MV_IO_BAR = 2, /* offset 0x18: IO space */
88 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
91 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
94 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
95 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
96 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
97 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
98 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
99 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101 MV_SATAHC0_REG_BASE = 0x20000,
102 MV_FLASH_CTL = 0x1046c,
103 MV_GPIO_PORT_CTL = 0x104f0,
104 MV_RESET_CFG = 0x180d8,
106 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
107 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
109 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
112 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
115 * CRPB needs alignment on a 256B boundary. Size == 256B
116 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
123 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
126 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
127 MV_PORT_HC_SHIFT = 2,
128 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
132 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
133 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
134 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
135 ATA_FLAG_SATA_RESET | ATA_FLAG_MMIO |
136 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING,
137 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
139 CRQB_FLAG_READ = (1 << 0),
141 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
142 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
143 CRQB_CMD_ADDR_SHIFT = 8,
144 CRQB_CMD_CS = (0x2 << 11),
145 CRQB_CMD_LAST = (1 << 15),
147 CRPB_FLAG_STATUS_SHIFT = 8,
148 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
149 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
151 EPRD_FLAG_END_OF_TBL = (1 << 31),
153 /* PCI interface registers */
155 PCI_COMMAND_OFS = 0xc00,
157 PCI_MAIN_CMD_STS_OFS = 0xd30,
158 STOP_PCI_MASTER = (1 << 2),
159 PCI_MASTER_EMPTY = (1 << 3),
160 GLOB_SFT_RST = (1 << 4),
163 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
164 MV_PCI_DISC_TIMER = 0xd04,
165 MV_PCI_MSI_TRIGGER = 0xc38,
166 MV_PCI_SERR_MASK = 0xc28,
167 MV_PCI_XBAR_TMOUT = 0x1d04,
168 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
169 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
170 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
171 MV_PCI_ERR_COMMAND = 0x1d50,
173 PCI_IRQ_CAUSE_OFS = 0x1d58,
174 PCI_IRQ_MASK_OFS = 0x1d5c,
175 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
200 /* SATAHC registers */
203 HC_IRQ_CAUSE_OFS = 0x14,
204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
208 /* Shadow block registers */
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
221 SATA_INTERFACE_CTL = 0x050,
223 MV_M2_PREAMP_MASK = 0x7e0,
227 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
228 EDMA_CFG_NCQ = (1 << 5),
229 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
230 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
231 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
233 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
234 EDMA_ERR_IRQ_MASK_OFS = 0xc,
235 EDMA_ERR_D_PAR = (1 << 0),
236 EDMA_ERR_PRD_PAR = (1 << 1),
237 EDMA_ERR_DEV = (1 << 2),
238 EDMA_ERR_DEV_DCON = (1 << 3),
239 EDMA_ERR_DEV_CON = (1 << 4),
240 EDMA_ERR_SERR = (1 << 5),
241 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
242 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
243 EDMA_ERR_BIST_ASYNC = (1 << 8),
244 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
245 EDMA_ERR_CRBQ_PAR = (1 << 9),
246 EDMA_ERR_CRPB_PAR = (1 << 10),
247 EDMA_ERR_INTRL_PAR = (1 << 11),
248 EDMA_ERR_IORDY = (1 << 12),
249 EDMA_ERR_LNK_CTRL_RX = (0xf << 13),
250 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
251 EDMA_ERR_LNK_DATA_RX = (0xf << 17),
252 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21),
253 EDMA_ERR_LNK_DATA_TX = (0x1f << 26),
254 EDMA_ERR_TRANS_PROTO = (1 << 31),
255 EDMA_ERR_OVERRUN_5 = (1 << 5),
256 EDMA_ERR_UNDERRUN_5 = (1 << 6),
257 EDMA_ERR_FATAL = (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
258 EDMA_ERR_DEV_DCON | EDMA_ERR_CRBQ_PAR |
259 EDMA_ERR_CRPB_PAR | EDMA_ERR_INTRL_PAR |
260 EDMA_ERR_IORDY | EDMA_ERR_LNK_CTRL_RX_2 |
261 EDMA_ERR_LNK_DATA_RX |
262 EDMA_ERR_LNK_DATA_TX |
263 EDMA_ERR_TRANS_PROTO),
265 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
266 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
268 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
269 EDMA_REQ_Q_PTR_SHIFT = 5,
271 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
272 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
273 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
274 EDMA_RSP_Q_PTR_SHIFT = 3,
281 EDMA_IORDY_TMOUT = 0x34,
284 /* Host private flags (hp_flags) */
285 MV_HP_FLAG_MSI = (1 << 0),
286 MV_HP_ERRATA_50XXB0 = (1 << 1),
287 MV_HP_ERRATA_50XXB2 = (1 << 2),
288 MV_HP_ERRATA_60X1B2 = (1 << 3),
289 MV_HP_ERRATA_60X1C0 = (1 << 4),
290 MV_HP_ERRATA_XX42A0 = (1 << 5),
291 MV_HP_50XX = (1 << 6),
292 MV_HP_GEN_IIE = (1 << 7),
294 /* Port private flags (pp_flags) */
295 MV_PP_FLAG_EDMA_EN = (1 << 0),
296 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
297 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
300 #define IS_50XX(hpriv) ((hpriv)->hp_flags & MV_HP_50XX)
301 #define IS_60XX(hpriv) (((hpriv)->hp_flags & MV_HP_50XX) == 0)
302 #define IS_GEN_I(hpriv) IS_50XX(hpriv)
303 #define IS_GEN_II(hpriv) IS_60XX(hpriv)
304 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
307 MV_DMA_BOUNDARY = 0xffffffffU,
309 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
311 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
324 /* Command ReQuest Block: 32B */
340 /* Command ResPonse Block: 8B */
347 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
355 struct mv_port_priv {
356 struct mv_crqb *crqb;
358 struct mv_crpb *crpb;
360 struct mv_sg *sg_tbl;
361 dma_addr_t sg_tbl_dma;
365 struct mv_port_signal {
372 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
374 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
375 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
377 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
379 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
380 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
383 struct mv_host_priv {
385 struct mv_port_signal signal[8];
386 const struct mv_hw_ops *ops;
389 static void mv_irq_clear(struct ata_port *ap);
390 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
391 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
392 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
393 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
394 static void mv_phy_reset(struct ata_port *ap);
395 static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
396 static int mv_port_start(struct ata_port *ap);
397 static void mv_port_stop(struct ata_port *ap);
398 static void mv_qc_prep(struct ata_queued_cmd *qc);
399 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
400 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
401 static void mv_eng_timeout(struct ata_port *ap);
402 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
404 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
406 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
407 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
409 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
411 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
412 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
414 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
416 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
417 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
419 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
421 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
422 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
423 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
424 unsigned int port_no);
425 static void mv_stop_and_reset(struct ata_port *ap);
427 static struct scsi_host_template mv5_sht = {
428 .module = THIS_MODULE,
430 .ioctl = ata_scsi_ioctl,
431 .queuecommand = ata_scsi_queuecmd,
432 .can_queue = ATA_DEF_QUEUE,
433 .this_id = ATA_SHT_THIS_ID,
434 .sg_tablesize = MV_MAX_SG_CT,
435 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
436 .emulated = ATA_SHT_EMULATED,
438 .proc_name = DRV_NAME,
439 .dma_boundary = MV_DMA_BOUNDARY,
440 .slave_configure = ata_scsi_slave_config,
441 .slave_destroy = ata_scsi_slave_destroy,
442 .bios_param = ata_std_bios_param,
445 static struct scsi_host_template mv6_sht = {
446 .module = THIS_MODULE,
448 .ioctl = ata_scsi_ioctl,
449 .queuecommand = ata_scsi_queuecmd,
450 .can_queue = ATA_DEF_QUEUE,
451 .this_id = ATA_SHT_THIS_ID,
452 .sg_tablesize = MV_MAX_SG_CT,
453 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
454 .emulated = ATA_SHT_EMULATED,
456 .proc_name = DRV_NAME,
457 .dma_boundary = MV_DMA_BOUNDARY,
458 .slave_configure = ata_scsi_slave_config,
459 .slave_destroy = ata_scsi_slave_destroy,
460 .bios_param = ata_std_bios_param,
463 static const struct ata_port_operations mv5_ops = {
464 .port_disable = ata_port_disable,
466 .tf_load = ata_tf_load,
467 .tf_read = ata_tf_read,
468 .check_status = ata_check_status,
469 .exec_command = ata_exec_command,
470 .dev_select = ata_std_dev_select,
472 .phy_reset = mv_phy_reset,
473 .cable_detect = ata_cable_sata,
475 .qc_prep = mv_qc_prep,
476 .qc_issue = mv_qc_issue,
477 .data_xfer = ata_data_xfer,
479 .eng_timeout = mv_eng_timeout,
481 .irq_clear = mv_irq_clear,
482 .irq_on = ata_irq_on,
483 .irq_ack = ata_irq_ack,
485 .scr_read = mv5_scr_read,
486 .scr_write = mv5_scr_write,
488 .port_start = mv_port_start,
489 .port_stop = mv_port_stop,
492 static const struct ata_port_operations mv6_ops = {
493 .port_disable = ata_port_disable,
495 .tf_load = ata_tf_load,
496 .tf_read = ata_tf_read,
497 .check_status = ata_check_status,
498 .exec_command = ata_exec_command,
499 .dev_select = ata_std_dev_select,
501 .phy_reset = mv_phy_reset,
502 .cable_detect = ata_cable_sata,
504 .qc_prep = mv_qc_prep,
505 .qc_issue = mv_qc_issue,
506 .data_xfer = ata_data_xfer,
508 .eng_timeout = mv_eng_timeout,
510 .irq_clear = mv_irq_clear,
511 .irq_on = ata_irq_on,
512 .irq_ack = ata_irq_ack,
514 .scr_read = mv_scr_read,
515 .scr_write = mv_scr_write,
517 .port_start = mv_port_start,
518 .port_stop = mv_port_stop,
521 static const struct ata_port_operations mv_iie_ops = {
522 .port_disable = ata_port_disable,
524 .tf_load = ata_tf_load,
525 .tf_read = ata_tf_read,
526 .check_status = ata_check_status,
527 .exec_command = ata_exec_command,
528 .dev_select = ata_std_dev_select,
530 .phy_reset = mv_phy_reset,
531 .cable_detect = ata_cable_sata,
533 .qc_prep = mv_qc_prep_iie,
534 .qc_issue = mv_qc_issue,
535 .data_xfer = ata_data_xfer,
537 .eng_timeout = mv_eng_timeout,
539 .irq_clear = mv_irq_clear,
540 .irq_on = ata_irq_on,
541 .irq_ack = ata_irq_ack,
543 .scr_read = mv_scr_read,
544 .scr_write = mv_scr_write,
546 .port_start = mv_port_start,
547 .port_stop = mv_port_stop,
550 static const struct ata_port_info mv_port_info[] = {
552 .flags = MV_COMMON_FLAGS,
553 .pio_mask = 0x1f, /* pio0-4 */
554 .udma_mask = ATA_UDMA6,
555 .port_ops = &mv5_ops,
558 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
559 .pio_mask = 0x1f, /* pio0-4 */
560 .udma_mask = ATA_UDMA6,
561 .port_ops = &mv5_ops,
564 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
565 .pio_mask = 0x1f, /* pio0-4 */
566 .udma_mask = ATA_UDMA6,
567 .port_ops = &mv5_ops,
570 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
571 .pio_mask = 0x1f, /* pio0-4 */
572 .udma_mask = ATA_UDMA6,
573 .port_ops = &mv6_ops,
576 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
578 .pio_mask = 0x1f, /* pio0-4 */
579 .udma_mask = ATA_UDMA6,
580 .port_ops = &mv6_ops,
583 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
584 .pio_mask = 0x1f, /* pio0-4 */
585 .udma_mask = ATA_UDMA6,
586 .port_ops = &mv_iie_ops,
589 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
590 .pio_mask = 0x1f, /* pio0-4 */
591 .udma_mask = ATA_UDMA6,
592 .port_ops = &mv_iie_ops,
596 static const struct pci_device_id mv_pci_tbl[] = {
597 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
598 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
599 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
600 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
602 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
603 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
604 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
605 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
606 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
608 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
611 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
613 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
615 /* add Marvell 7042 support */
616 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
618 { } /* terminate list */
621 static struct pci_driver mv_pci_driver = {
623 .id_table = mv_pci_tbl,
624 .probe = mv_init_one,
625 .remove = ata_pci_remove_one,
628 static const struct mv_hw_ops mv5xxx_ops = {
629 .phy_errata = mv5_phy_errata,
630 .enable_leds = mv5_enable_leds,
631 .read_preamp = mv5_read_preamp,
632 .reset_hc = mv5_reset_hc,
633 .reset_flash = mv5_reset_flash,
634 .reset_bus = mv5_reset_bus,
637 static const struct mv_hw_ops mv6xxx_ops = {
638 .phy_errata = mv6_phy_errata,
639 .enable_leds = mv6_enable_leds,
640 .read_preamp = mv6_read_preamp,
641 .reset_hc = mv6_reset_hc,
642 .reset_flash = mv6_reset_flash,
643 .reset_bus = mv_reset_pci_bus,
649 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
652 /* move to PCI layer or libata core? */
653 static int pci_go_64(struct pci_dev *pdev)
657 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
658 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
660 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
662 dev_printk(KERN_ERR, &pdev->dev,
663 "64-bit DMA enable failed\n");
668 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
670 dev_printk(KERN_ERR, &pdev->dev,
671 "32-bit DMA enable failed\n");
674 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
676 dev_printk(KERN_ERR, &pdev->dev,
677 "32-bit consistent DMA enable failed\n");
689 static inline void writelfl(unsigned long data, void __iomem *addr)
692 (void) readl(addr); /* flush to avoid PCI posted write */
695 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
697 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
700 static inline unsigned int mv_hc_from_port(unsigned int port)
702 return port >> MV_PORT_HC_SHIFT;
705 static inline unsigned int mv_hardport_from_port(unsigned int port)
707 return port & MV_PORT_MASK;
710 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
713 return mv_hc_base(base, mv_hc_from_port(port));
716 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
718 return mv_hc_base_from_port(base, port) +
719 MV_SATAHC_ARBTR_REG_SZ +
720 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
723 static inline void __iomem *mv_ap_base(struct ata_port *ap)
725 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
728 static inline int mv_get_hc_count(unsigned long port_flags)
730 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
733 static void mv_irq_clear(struct ata_port *ap)
737 static void mv_set_edma_ptrs(void __iomem *port_mmio,
738 struct mv_host_priv *hpriv,
739 struct mv_port_priv *pp)
742 * initialize request queue
744 WARN_ON(pp->crqb_dma & 0x3ff);
745 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
746 writelfl(pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK,
747 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
749 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
750 writelfl(pp->crqb_dma & 0xffffffff,
751 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
753 writelfl(0, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
756 * initialize response queue
758 WARN_ON(pp->crpb_dma & 0xff);
759 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
761 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
762 writelfl(pp->crpb_dma & 0xffffffff,
763 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
765 writelfl(0, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
767 writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK,
768 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
773 * mv_start_dma - Enable eDMA engine
774 * @base: port base address
775 * @pp: port private data
777 * Verify the local cache of the eDMA state is accurate with a
781 * Inherited from caller.
783 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
784 struct mv_port_priv *pp)
786 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
787 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
788 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
790 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
794 * mv_stop_dma - Disable eDMA engine
795 * @ap: ATA channel to manipulate
797 * Verify the local cache of the eDMA state is accurate with a
801 * Inherited from caller.
803 static int mv_stop_dma(struct ata_port *ap)
805 void __iomem *port_mmio = mv_ap_base(ap);
806 struct mv_port_priv *pp = ap->private_data;
810 if (MV_PP_FLAG_EDMA_EN & pp->pp_flags) {
811 /* Disable EDMA if active. The disable bit auto clears.
813 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
814 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
816 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
819 /* now properly wait for the eDMA to stop */
820 for (i = 1000; i > 0; i--) {
821 reg = readl(port_mmio + EDMA_CMD_OFS);
822 if (!(EDMA_EN & reg)) {
829 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
830 /* FIXME: Consider doing a reset here to recover */
838 static void mv_dump_mem(void __iomem *start, unsigned bytes)
841 for (b = 0; b < bytes; ) {
842 DPRINTK("%p: ", start + b);
843 for (w = 0; b < bytes && w < 4; w++) {
844 printk("%08x ",readl(start + b));
852 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
857 for (b = 0; b < bytes; ) {
858 DPRINTK("%02x: ", b);
859 for (w = 0; b < bytes && w < 4; w++) {
860 (void) pci_read_config_dword(pdev,b,&dw);
868 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
869 struct pci_dev *pdev)
872 void __iomem *hc_base = mv_hc_base(mmio_base,
873 port >> MV_PORT_HC_SHIFT);
874 void __iomem *port_base;
875 int start_port, num_ports, p, start_hc, num_hcs, hc;
878 start_hc = start_port = 0;
879 num_ports = 8; /* shld be benign for 4 port devs */
882 start_hc = port >> MV_PORT_HC_SHIFT;
884 num_ports = num_hcs = 1;
886 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
887 num_ports > 1 ? num_ports - 1 : start_port);
890 DPRINTK("PCI config space regs:\n");
891 mv_dump_pci_cfg(pdev, 0x68);
893 DPRINTK("PCI regs:\n");
894 mv_dump_mem(mmio_base+0xc00, 0x3c);
895 mv_dump_mem(mmio_base+0xd00, 0x34);
896 mv_dump_mem(mmio_base+0xf00, 0x4);
897 mv_dump_mem(mmio_base+0x1d00, 0x6c);
898 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
899 hc_base = mv_hc_base(mmio_base, hc);
900 DPRINTK("HC regs (HC %i):\n", hc);
901 mv_dump_mem(hc_base, 0x1c);
903 for (p = start_port; p < start_port + num_ports; p++) {
904 port_base = mv_port_base(mmio_base, p);
905 DPRINTK("EDMA regs (port %i):\n",p);
906 mv_dump_mem(port_base, 0x54);
907 DPRINTK("SATA regs (port %i):\n",p);
908 mv_dump_mem(port_base+0x300, 0x60);
913 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
921 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
924 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
933 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
935 unsigned int ofs = mv_scr_offset(sc_reg_in);
937 if (0xffffffffU != ofs)
938 return readl(mv_ap_base(ap) + ofs);
943 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
945 unsigned int ofs = mv_scr_offset(sc_reg_in);
947 if (0xffffffffU != ofs)
948 writelfl(val, mv_ap_base(ap) + ofs);
951 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
952 void __iomem *port_mmio)
954 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
956 /* set up non-NCQ EDMA configuration */
957 cfg &= ~(1 << 9); /* disable eQue */
959 if (IS_GEN_I(hpriv)) {
960 cfg &= ~0x1f; /* clear queue depth */
961 cfg |= (1 << 8); /* enab config burst size mask */
964 else if (IS_GEN_II(hpriv)) {
965 cfg &= ~0x1f; /* clear queue depth */
966 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
967 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
970 else if (IS_GEN_IIE(hpriv)) {
971 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
972 cfg |= (1 << 22); /* enab 4-entry host queue cache */
973 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
974 cfg |= (1 << 18); /* enab early completion */
975 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
976 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
977 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
980 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
984 * mv_port_start - Port specific init/start routine.
985 * @ap: ATA channel to manipulate
987 * Allocate and point to DMA memory, init port private memory,
991 * Inherited from caller.
993 static int mv_port_start(struct ata_port *ap)
995 struct device *dev = ap->host->dev;
996 struct mv_host_priv *hpriv = ap->host->private_data;
997 struct mv_port_priv *pp;
998 void __iomem *port_mmio = mv_ap_base(ap);
1003 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1007 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1011 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1013 rc = ata_pad_alloc(ap, dev);
1017 /* First item in chunk of DMA memory:
1018 * 32-slot command request table (CRQB), 32 bytes each in size
1021 pp->crqb_dma = mem_dma;
1022 mem += MV_CRQB_Q_SZ;
1023 mem_dma += MV_CRQB_Q_SZ;
1026 * 32-slot command response table (CRPB), 8 bytes each in size
1029 pp->crpb_dma = mem_dma;
1030 mem += MV_CRPB_Q_SZ;
1031 mem_dma += MV_CRPB_Q_SZ;
1034 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1037 pp->sg_tbl_dma = mem_dma;
1039 mv_edma_cfg(ap, hpriv, port_mmio);
1041 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1043 /* Don't turn on EDMA here...do it before DMA commands only. Else
1044 * we'll be unable to send non-data, PIO, etc due to restricted access
1047 ap->private_data = pp;
1052 * mv_port_stop - Port specific cleanup/stop routine.
1053 * @ap: ATA channel to manipulate
1055 * Stop DMA, cleanup port memory.
1058 * This routine uses the host lock to protect the DMA stop.
1060 static void mv_port_stop(struct ata_port *ap)
1062 unsigned long flags;
1064 spin_lock_irqsave(&ap->host->lock, flags);
1066 spin_unlock_irqrestore(&ap->host->lock, flags);
1070 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1071 * @qc: queued command whose SG list to source from
1073 * Populate the SG list and mark the last entry.
1076 * Inherited from caller.
1078 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1080 struct mv_port_priv *pp = qc->ap->private_data;
1081 unsigned int n_sg = 0;
1082 struct scatterlist *sg;
1083 struct mv_sg *mv_sg;
1086 ata_for_each_sg(sg, qc) {
1087 dma_addr_t addr = sg_dma_address(sg);
1088 u32 sg_len = sg_dma_len(sg);
1090 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1091 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1092 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1094 if (ata_sg_is_last(sg, qc))
1095 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1104 static inline unsigned mv_inc_q_index(unsigned index)
1106 return (index + 1) & MV_MAX_Q_DEPTH_MASK;
1109 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1111 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1112 (last ? CRQB_CMD_LAST : 0);
1113 *cmdw = cpu_to_le16(tmp);
1117 * mv_qc_prep - Host specific command preparation.
1118 * @qc: queued command to prepare
1120 * This routine simply redirects to the general purpose routine
1121 * if command is not DMA. Else, it handles prep of the CRQB
1122 * (command request block), does some sanity checking, and calls
1123 * the SG load routine.
1126 * Inherited from caller.
1128 static void mv_qc_prep(struct ata_queued_cmd *qc)
1130 struct ata_port *ap = qc->ap;
1131 struct mv_port_priv *pp = ap->private_data;
1133 struct ata_taskfile *tf;
1137 if (qc->tf.protocol != ATA_PROT_DMA)
1140 /* Fill in command request block
1142 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1143 flags |= CRQB_FLAG_READ;
1144 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1145 flags |= qc->tag << CRQB_TAG_SHIFT;
1147 /* get current queue index from hardware */
1148 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1149 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1151 pp->crqb[in_index].sg_addr =
1152 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1153 pp->crqb[in_index].sg_addr_hi =
1154 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1155 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1157 cw = &pp->crqb[in_index].ata_cmd[0];
1160 /* Sadly, the CRQB cannot accomodate all registers--there are
1161 * only 11 bytes...so we must pick and choose required
1162 * registers based on the command. So, we drop feature and
1163 * hob_feature for [RW] DMA commands, but they are needed for
1164 * NCQ. NCQ will drop hob_nsect.
1166 switch (tf->command) {
1168 case ATA_CMD_READ_EXT:
1170 case ATA_CMD_WRITE_EXT:
1171 case ATA_CMD_WRITE_FUA_EXT:
1172 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1174 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1175 case ATA_CMD_FPDMA_READ:
1176 case ATA_CMD_FPDMA_WRITE:
1177 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1178 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1180 #endif /* FIXME: remove this line when NCQ added */
1182 /* The only other commands EDMA supports in non-queued and
1183 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1184 * of which are defined/used by Linux. If we get here, this
1185 * driver needs work.
1187 * FIXME: modify libata to give qc_prep a return value and
1188 * return error here.
1190 BUG_ON(tf->command);
1193 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1194 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1195 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1196 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1197 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1198 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1199 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1200 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1201 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1203 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1209 * mv_qc_prep_iie - Host specific command preparation.
1210 * @qc: queued command to prepare
1212 * This routine simply redirects to the general purpose routine
1213 * if command is not DMA. Else, it handles prep of the CRQB
1214 * (command request block), does some sanity checking, and calls
1215 * the SG load routine.
1218 * Inherited from caller.
1220 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1222 struct ata_port *ap = qc->ap;
1223 struct mv_port_priv *pp = ap->private_data;
1224 struct mv_crqb_iie *crqb;
1225 struct ata_taskfile *tf;
1229 if (qc->tf.protocol != ATA_PROT_DMA)
1232 /* Fill in Gen IIE command request block
1234 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1235 flags |= CRQB_FLAG_READ;
1237 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1238 flags |= qc->tag << CRQB_TAG_SHIFT;
1240 /* get current queue index from hardware */
1241 in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS)
1242 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1244 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1245 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1246 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1247 crqb->flags = cpu_to_le32(flags);
1250 crqb->ata_cmd[0] = cpu_to_le32(
1251 (tf->command << 16) |
1254 crqb->ata_cmd[1] = cpu_to_le32(
1260 crqb->ata_cmd[2] = cpu_to_le32(
1261 (tf->hob_lbal << 0) |
1262 (tf->hob_lbam << 8) |
1263 (tf->hob_lbah << 16) |
1264 (tf->hob_feature << 24)
1266 crqb->ata_cmd[3] = cpu_to_le32(
1268 (tf->hob_nsect << 8)
1271 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1277 * mv_qc_issue - Initiate a command to the host
1278 * @qc: queued command to start
1280 * This routine simply redirects to the general purpose routine
1281 * if command is not DMA. Else, it sanity checks our local
1282 * caches of the request producer/consumer indices then enables
1283 * DMA and bumps the request producer index.
1286 * Inherited from caller.
1288 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1290 struct ata_port *ap = qc->ap;
1291 void __iomem *port_mmio = mv_ap_base(ap);
1292 struct mv_port_priv *pp = ap->private_data;
1293 struct mv_host_priv *hpriv = ap->host->private_data;
1297 if (qc->tf.protocol != ATA_PROT_DMA) {
1298 /* We're about to send a non-EDMA capable command to the
1299 * port. Turn off EDMA so there won't be problems accessing
1300 * shadow block, etc registers.
1303 return ata_qc_issue_prot(qc);
1306 in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1307 in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1309 /* until we do queuing, the queue should be empty at this point */
1310 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1311 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1313 in_index = mv_inc_q_index(in_index); /* now incr producer index */
1315 mv_start_dma(port_mmio, hpriv, pp);
1317 /* and write the request in pointer to kick the EDMA to life */
1318 in_ptr &= EDMA_REQ_Q_BASE_LO_MASK;
1319 in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT;
1320 writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1326 * mv_get_crpb_status - get status from most recently completed cmd
1327 * @ap: ATA channel to manipulate
1329 * This routine is for use when the port is in DMA mode, when it
1330 * will be using the CRPB (command response block) method of
1331 * returning command completion information. We check indices
1332 * are good, grab status, and bump the response consumer index to
1333 * prove that we're up to date.
1336 * Inherited from caller.
1338 static u8 mv_get_crpb_status(struct ata_port *ap)
1340 void __iomem *port_mmio = mv_ap_base(ap);
1341 struct mv_port_priv *pp = ap->private_data;
1346 out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1347 out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1349 ata_status = le16_to_cpu(pp->crpb[out_index].flags)
1350 >> CRPB_FLAG_STATUS_SHIFT;
1352 /* increment our consumer index... */
1353 out_index = mv_inc_q_index(out_index);
1355 /* and, until we do NCQ, there should only be 1 CRPB waiting */
1356 WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1357 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1359 /* write out our inc'd consumer index so EDMA knows we're caught up */
1360 out_ptr &= EDMA_RSP_Q_BASE_LO_MASK;
1361 out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT;
1362 writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1364 /* Return ATA status register for completed CRPB */
1369 * mv_err_intr - Handle error interrupts on the port
1370 * @ap: ATA channel to manipulate
1371 * @reset_allowed: bool: 0 == don't trigger from reset here
1373 * In most cases, just clear the interrupt and move on. However,
1374 * some cases require an eDMA reset, which is done right before
1375 * the COMRESET in mv_phy_reset(). The SERR case requires a
1376 * clear of pending errors in the SATA SERROR register. Finally,
1377 * if the port disabled DMA, update our cached copy to match.
1380 * Inherited from caller.
1382 static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1384 void __iomem *port_mmio = mv_ap_base(ap);
1385 u32 edma_err_cause, serr = 0;
1387 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1389 if (EDMA_ERR_SERR & edma_err_cause) {
1390 sata_scr_read(ap, SCR_ERROR, &serr);
1391 sata_scr_write_flush(ap, SCR_ERROR, serr);
1393 if (EDMA_ERR_SELF_DIS & edma_err_cause) {
1394 struct mv_port_priv *pp = ap->private_data;
1395 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1397 DPRINTK(KERN_ERR "ata%u: port error; EDMA err cause: 0x%08x "
1398 "SERR: 0x%08x\n", ap->print_id, edma_err_cause, serr);
1400 /* Clear EDMA now that SERR cleanup done */
1401 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1403 /* check for fatal here and recover if needed */
1404 if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause))
1405 mv_stop_and_reset(ap);
1409 * mv_host_intr - Handle all interrupts on the given host controller
1410 * @host: host specific structure
1411 * @relevant: port error bits relevant to this host controller
1412 * @hc: which host controller we're to look at
1414 * Read then write clear the HC interrupt status then walk each
1415 * port connected to the HC and see if it needs servicing. Port
1416 * success ints are reported in the HC interrupt status reg, the
1417 * port error ints are reported in the higher level main
1418 * interrupt status register and thus are passed in via the
1419 * 'relevant' argument.
1422 * Inherited from caller.
1424 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1426 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1427 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1428 struct ata_queued_cmd *qc;
1431 int shift, hard_port, handled;
1432 unsigned int err_mask;
1437 port0 = MV_PORTS_PER_HC;
1439 /* we'll need the HC success int register in most cases */
1440 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1442 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1444 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1445 hc,relevant,hc_irq_cause);
1447 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1449 struct ata_port *ap = host->ports[port];
1450 struct mv_port_priv *pp = ap->private_data;
1452 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1453 handled = 0; /* ensure ata_status is set if handled++ */
1455 /* Note that DEV_IRQ might happen spuriously during EDMA,
1456 * and should be ignored in such cases.
1457 * The cause of this is still under investigation.
1459 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1460 /* EDMA: check for response queue interrupt */
1461 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) {
1462 ata_status = mv_get_crpb_status(ap);
1466 /* PIO: check for device (drive) interrupt */
1467 if ((DEV_IRQ << hard_port) & hc_irq_cause) {
1468 ata_status = readb(ap->ioaddr.status_addr);
1470 /* ignore spurious intr if drive still BUSY */
1471 if (ata_status & ATA_BUSY) {
1478 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1481 err_mask = ac_err_mask(ata_status);
1483 shift = port << 1; /* (port * 2) */
1484 if (port >= MV_PORTS_PER_HC) {
1485 shift++; /* skip bit 8 in the HC Main IRQ reg */
1487 if ((PORT0_ERR << shift) & relevant) {
1489 err_mask |= AC_ERR_OTHER;
1494 qc = ata_qc_from_tag(ap, ap->active_tag);
1495 if (qc && (qc->flags & ATA_QCFLAG_ACTIVE)) {
1496 VPRINTK("port %u IRQ found for qc, "
1497 "ata_status 0x%x\n", port,ata_status);
1498 /* mark qc status appropriately */
1499 if (!(qc->tf.flags & ATA_TFLAG_POLLING)) {
1500 qc->err_mask |= err_mask;
1501 ata_qc_complete(qc);
1510 * mv_interrupt - Main interrupt event handler
1512 * @dev_instance: private data; in this case the host structure
1514 * Read the read only register to determine if any host
1515 * controllers have pending interrupts. If so, call lower level
1516 * routine to handle. Also check for PCI errors which are only
1520 * This routine holds the host lock while processing pending
1523 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1525 struct ata_host *host = dev_instance;
1526 unsigned int hc, handled = 0, n_hcs;
1527 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1528 struct mv_host_priv *hpriv;
1531 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1533 /* check the cases where we either have nothing pending or have read
1534 * a bogus register value which can indicate HW removal or PCI fault
1536 if (!irq_stat || (0xffffffffU == irq_stat))
1539 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1540 spin_lock(&host->lock);
1542 for (hc = 0; hc < n_hcs; hc++) {
1543 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1545 mv_host_intr(host, relevant, hc);
1550 hpriv = host->private_data;
1551 if (IS_60XX(hpriv)) {
1552 /* deal with the interrupt coalescing bits */
1553 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
1554 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO);
1555 writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI);
1556 writelfl(0, mmio + MV_IRQ_COAL_CAUSE);
1560 if (PCI_ERR & irq_stat) {
1561 printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n",
1562 readl(mmio + PCI_IRQ_CAUSE_OFS));
1564 DPRINTK("All regs @ PCI error\n");
1565 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1567 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1570 spin_unlock(&host->lock);
1572 return IRQ_RETVAL(handled);
1575 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1577 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1578 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1580 return hc_mmio + ofs;
1583 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1587 switch (sc_reg_in) {
1591 ofs = sc_reg_in * sizeof(u32);
1600 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1602 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1603 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1604 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1606 if (ofs != 0xffffffffU)
1607 return readl(addr + ofs);
1612 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1614 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1615 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1616 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1618 if (ofs != 0xffffffffU)
1619 writelfl(val, addr + ofs);
1622 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1627 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
1629 early_5080 = (pdev->device == 0x5080) && (rev_id == 0);
1632 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1634 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1637 mv_reset_pci_bus(pdev, mmio);
1640 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1642 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1645 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1648 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1651 tmp = readl(phy_mmio + MV5_PHY_MODE);
1653 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1654 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1657 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1661 writel(0, mmio + MV_GPIO_PORT_CTL);
1663 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1665 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1667 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1670 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1673 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1674 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1676 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1679 tmp = readl(phy_mmio + MV5_LT_MODE);
1681 writel(tmp, phy_mmio + MV5_LT_MODE);
1683 tmp = readl(phy_mmio + MV5_PHY_CTL);
1686 writel(tmp, phy_mmio + MV5_PHY_CTL);
1689 tmp = readl(phy_mmio + MV5_PHY_MODE);
1691 tmp |= hpriv->signal[port].pre;
1692 tmp |= hpriv->signal[port].amps;
1693 writel(tmp, phy_mmio + MV5_PHY_MODE);
1698 #define ZERO(reg) writel(0, port_mmio + (reg))
1699 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1702 void __iomem *port_mmio = mv_port_base(mmio, port);
1704 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1706 mv_channel_reset(hpriv, mmio, port);
1708 ZERO(0x028); /* command */
1709 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1710 ZERO(0x004); /* timer */
1711 ZERO(0x008); /* irq err cause */
1712 ZERO(0x00c); /* irq err mask */
1713 ZERO(0x010); /* rq bah */
1714 ZERO(0x014); /* rq inp */
1715 ZERO(0x018); /* rq outp */
1716 ZERO(0x01c); /* respq bah */
1717 ZERO(0x024); /* respq outp */
1718 ZERO(0x020); /* respq inp */
1719 ZERO(0x02c); /* test control */
1720 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1724 #define ZERO(reg) writel(0, hc_mmio + (reg))
1725 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1728 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1736 tmp = readl(hc_mmio + 0x20);
1739 writel(tmp, hc_mmio + 0x20);
1743 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1746 unsigned int hc, port;
1748 for (hc = 0; hc < n_hc; hc++) {
1749 for (port = 0; port < MV_PORTS_PER_HC; port++)
1750 mv5_reset_hc_port(hpriv, mmio,
1751 (hc * MV_PORTS_PER_HC) + port);
1753 mv5_reset_one_hc(hpriv, mmio, hc);
1760 #define ZERO(reg) writel(0, mmio + (reg))
1761 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1765 tmp = readl(mmio + MV_PCI_MODE);
1767 writel(tmp, mmio + MV_PCI_MODE);
1769 ZERO(MV_PCI_DISC_TIMER);
1770 ZERO(MV_PCI_MSI_TRIGGER);
1771 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1772 ZERO(HC_MAIN_IRQ_MASK_OFS);
1773 ZERO(MV_PCI_SERR_MASK);
1774 ZERO(PCI_IRQ_CAUSE_OFS);
1775 ZERO(PCI_IRQ_MASK_OFS);
1776 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1777 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1778 ZERO(MV_PCI_ERR_ATTRIBUTE);
1779 ZERO(MV_PCI_ERR_COMMAND);
1783 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1787 mv5_reset_flash(hpriv, mmio);
1789 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1791 tmp |= (1 << 5) | (1 << 6);
1792 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1796 * mv6_reset_hc - Perform the 6xxx global soft reset
1797 * @mmio: base address of the HBA
1799 * This routine only applies to 6xxx parts.
1802 * Inherited from caller.
1804 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1807 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1811 /* Following procedure defined in PCI "main command and status
1815 writel(t | STOP_PCI_MASTER, reg);
1817 for (i = 0; i < 1000; i++) {
1820 if (PCI_MASTER_EMPTY & t) {
1824 if (!(PCI_MASTER_EMPTY & t)) {
1825 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1833 writel(t | GLOB_SFT_RST, reg);
1836 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1838 if (!(GLOB_SFT_RST & t)) {
1839 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1844 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1847 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1850 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1852 if (GLOB_SFT_RST & t) {
1853 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1860 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
1863 void __iomem *port_mmio;
1866 tmp = readl(mmio + MV_RESET_CFG);
1867 if ((tmp & (1 << 0)) == 0) {
1868 hpriv->signal[idx].amps = 0x7 << 8;
1869 hpriv->signal[idx].pre = 0x1 << 5;
1873 port_mmio = mv_port_base(mmio, idx);
1874 tmp = readl(port_mmio + PHY_MODE2);
1876 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
1877 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
1880 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1882 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
1885 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1888 void __iomem *port_mmio = mv_port_base(mmio, port);
1890 u32 hp_flags = hpriv->hp_flags;
1892 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1894 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
1897 if (fix_phy_mode2) {
1898 m2 = readl(port_mmio + PHY_MODE2);
1901 writel(m2, port_mmio + PHY_MODE2);
1905 m2 = readl(port_mmio + PHY_MODE2);
1906 m2 &= ~((1 << 16) | (1 << 31));
1907 writel(m2, port_mmio + PHY_MODE2);
1912 /* who knows what this magic does */
1913 tmp = readl(port_mmio + PHY_MODE3);
1916 writel(tmp, port_mmio + PHY_MODE3);
1918 if (fix_phy_mode4) {
1921 m4 = readl(port_mmio + PHY_MODE4);
1923 if (hp_flags & MV_HP_ERRATA_60X1B2)
1924 tmp = readl(port_mmio + 0x310);
1926 m4 = (m4 & ~(1 << 1)) | (1 << 0);
1928 writel(m4, port_mmio + PHY_MODE4);
1930 if (hp_flags & MV_HP_ERRATA_60X1B2)
1931 writel(tmp, port_mmio + 0x310);
1934 /* Revert values of pre-emphasis and signal amps to the saved ones */
1935 m2 = readl(port_mmio + PHY_MODE2);
1937 m2 &= ~MV_M2_PREAMP_MASK;
1938 m2 |= hpriv->signal[port].amps;
1939 m2 |= hpriv->signal[port].pre;
1942 /* according to mvSata 3.6.1, some IIE values are fixed */
1943 if (IS_GEN_IIE(hpriv)) {
1948 writel(m2, port_mmio + PHY_MODE2);
1951 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1952 unsigned int port_no)
1954 void __iomem *port_mmio = mv_port_base(mmio, port_no);
1956 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
1958 if (IS_60XX(hpriv)) {
1959 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
1960 ifctl |= (1 << 7); /* enable gen2i speed */
1961 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
1962 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
1965 udelay(25); /* allow reset propagation */
1967 /* Spec never mentions clearing the bit. Marvell's driver does
1968 * clear the bit, however.
1970 writelfl(0, port_mmio + EDMA_CMD_OFS);
1972 hpriv->ops->phy_errata(hpriv, mmio, port_no);
1978 static void mv_stop_and_reset(struct ata_port *ap)
1980 struct mv_host_priv *hpriv = ap->host->private_data;
1981 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1985 mv_channel_reset(hpriv, mmio, ap->port_no);
1987 __mv_phy_reset(ap, 0);
1990 static inline void __msleep(unsigned int msec, int can_sleep)
1999 * __mv_phy_reset - Perform eDMA reset followed by COMRESET
2000 * @ap: ATA channel to manipulate
2002 * Part of this is taken from __sata_phy_reset and modified to
2003 * not sleep since this routine gets called from interrupt level.
2006 * Inherited from caller. This is coded to safe to call at
2007 * interrupt level, i.e. it does not sleep.
2009 static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
2011 struct mv_port_priv *pp = ap->private_data;
2012 struct mv_host_priv *hpriv = ap->host->private_data;
2013 void __iomem *port_mmio = mv_ap_base(ap);
2014 struct ata_taskfile tf;
2015 struct ata_device *dev = &ap->device[0];
2016 unsigned long deadline;
2020 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2022 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2023 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2024 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2026 /* Issue COMRESET via SControl */
2028 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2029 __msleep(1, can_sleep);
2031 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2032 __msleep(20, can_sleep);
2034 deadline = jiffies + msecs_to_jiffies(200);
2036 sata_scr_read(ap, SCR_STATUS, &sstatus);
2037 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2040 __msleep(1, can_sleep);
2041 } while (time_before(jiffies, deadline));
2043 /* work around errata */
2044 if (IS_60XX(hpriv) &&
2045 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2047 goto comreset_retry;
2049 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2050 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2051 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2053 if (ata_port_online(ap)) {
2056 sata_scr_read(ap, SCR_STATUS, &sstatus);
2057 ata_port_printk(ap, KERN_INFO,
2058 "no device found (phy stat %08x)\n", sstatus);
2059 ata_port_disable(ap);
2063 /* even after SStatus reflects that device is ready,
2064 * it seems to take a while for link to be fully
2065 * established (and thus Status no longer 0x80/0x7F),
2066 * so we poll a bit for that, here.
2070 u8 drv_stat = ata_check_status(ap);
2071 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2073 __msleep(500, can_sleep);
2078 tf.lbah = readb(ap->ioaddr.lbah_addr);
2079 tf.lbam = readb(ap->ioaddr.lbam_addr);
2080 tf.lbal = readb(ap->ioaddr.lbal_addr);
2081 tf.nsect = readb(ap->ioaddr.nsect_addr);
2083 dev->class = ata_dev_classify(&tf);
2084 if (!ata_dev_enabled(dev)) {
2085 VPRINTK("Port disabled post-sig: No device present.\n");
2086 ata_port_disable(ap);
2089 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2091 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
2096 static void mv_phy_reset(struct ata_port *ap)
2098 __mv_phy_reset(ap, 1);
2102 * mv_eng_timeout - Routine called by libata when SCSI times out I/O
2103 * @ap: ATA channel to manipulate
2105 * Intent is to clear all pending error conditions, reset the
2106 * chip/bus, fail the command, and move on.
2109 * This routine holds the host lock while failing the command.
2111 static void mv_eng_timeout(struct ata_port *ap)
2113 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2114 struct ata_queued_cmd *qc;
2115 unsigned long flags;
2117 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2118 DPRINTK("All regs @ start of eng_timeout\n");
2119 mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
2121 qc = ata_qc_from_tag(ap, ap->active_tag);
2122 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2123 mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2125 spin_lock_irqsave(&ap->host->lock, flags);
2127 mv_stop_and_reset(ap);
2128 spin_unlock_irqrestore(&ap->host->lock, flags);
2130 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2131 if (qc->flags & ATA_QCFLAG_ACTIVE) {
2132 qc->err_mask |= AC_ERR_TIMEOUT;
2133 ata_eh_qc_complete(qc);
2138 * mv_port_init - Perform some early initialization on a single port.
2139 * @port: libata data structure storing shadow register addresses
2140 * @port_mmio: base address of the port
2142 * Initialize shadow register mmio addresses, clear outstanding
2143 * interrupts on the port, and unmask interrupts for the future
2144 * start of the port.
2147 * Inherited from caller.
2149 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2151 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2154 /* PIO related setup
2156 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2158 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2159 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2160 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2161 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2162 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2163 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2165 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2166 /* special case: control/altstatus doesn't have ATA_REG_ address */
2167 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2170 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2172 /* Clear any currently outstanding port interrupt conditions */
2173 serr_ofs = mv_scr_offset(SCR_ERROR);
2174 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2175 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2177 /* unmask all EDMA error interrupts */
2178 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2180 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2181 readl(port_mmio + EDMA_CFG_OFS),
2182 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2183 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2186 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2188 struct pci_dev *pdev = to_pci_dev(host->dev);
2189 struct mv_host_priv *hpriv = host->private_data;
2191 u32 hp_flags = hpriv->hp_flags;
2193 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2197 hpriv->ops = &mv5xxx_ops;
2198 hp_flags |= MV_HP_50XX;
2202 hp_flags |= MV_HP_ERRATA_50XXB0;
2205 hp_flags |= MV_HP_ERRATA_50XXB2;
2208 dev_printk(KERN_WARNING, &pdev->dev,
2209 "Applying 50XXB2 workarounds to unknown rev\n");
2210 hp_flags |= MV_HP_ERRATA_50XXB2;
2217 hpriv->ops = &mv5xxx_ops;
2218 hp_flags |= MV_HP_50XX;
2222 hp_flags |= MV_HP_ERRATA_50XXB0;
2225 hp_flags |= MV_HP_ERRATA_50XXB2;
2228 dev_printk(KERN_WARNING, &pdev->dev,
2229 "Applying B2 workarounds to unknown rev\n");
2230 hp_flags |= MV_HP_ERRATA_50XXB2;
2237 hpriv->ops = &mv6xxx_ops;
2241 hp_flags |= MV_HP_ERRATA_60X1B2;
2244 hp_flags |= MV_HP_ERRATA_60X1C0;
2247 dev_printk(KERN_WARNING, &pdev->dev,
2248 "Applying B2 workarounds to unknown rev\n");
2249 hp_flags |= MV_HP_ERRATA_60X1B2;
2256 hpriv->ops = &mv6xxx_ops;
2258 hp_flags |= MV_HP_GEN_IIE;
2262 hp_flags |= MV_HP_ERRATA_XX42A0;
2265 hp_flags |= MV_HP_ERRATA_60X1C0;
2268 dev_printk(KERN_WARNING, &pdev->dev,
2269 "Applying 60X1C0 workarounds to unknown rev\n");
2270 hp_flags |= MV_HP_ERRATA_60X1C0;
2276 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2280 hpriv->hp_flags = hp_flags;
2286 * mv_init_host - Perform some early initialization of the host.
2287 * @host: ATA host to initialize
2288 * @board_idx: controller index
2290 * If possible, do an early global reset of the host. Then do
2291 * our port init and clear/unmask all/relevant host interrupts.
2294 * Inherited from caller.
2296 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2298 int rc = 0, n_hc, port, hc;
2299 struct pci_dev *pdev = to_pci_dev(host->dev);
2300 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2301 struct mv_host_priv *hpriv = host->private_data;
2303 /* global interrupt mask */
2304 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2306 rc = mv_chip_id(host, board_idx);
2310 n_hc = mv_get_hc_count(host->ports[0]->flags);
2312 for (port = 0; port < host->n_ports; port++)
2313 hpriv->ops->read_preamp(hpriv, port, mmio);
2315 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2319 hpriv->ops->reset_flash(hpriv, mmio);
2320 hpriv->ops->reset_bus(pdev, mmio);
2321 hpriv->ops->enable_leds(hpriv, mmio);
2323 for (port = 0; port < host->n_ports; port++) {
2324 if (IS_60XX(hpriv)) {
2325 void __iomem *port_mmio = mv_port_base(mmio, port);
2327 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2328 ifctl |= (1 << 7); /* enable gen2i speed */
2329 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2330 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2333 hpriv->ops->phy_errata(hpriv, mmio, port);
2336 for (port = 0; port < host->n_ports; port++) {
2337 void __iomem *port_mmio = mv_port_base(mmio, port);
2338 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2341 for (hc = 0; hc < n_hc; hc++) {
2342 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2344 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2345 "(before clear)=0x%08x\n", hc,
2346 readl(hc_mmio + HC_CFG_OFS),
2347 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2349 /* Clear any currently outstanding hc interrupt conditions */
2350 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2353 /* Clear any currently outstanding host interrupt conditions */
2354 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2356 /* and unmask interrupt generation for host regs */
2357 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2360 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2362 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2364 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2365 "PCI int cause/mask=0x%08x/0x%08x\n",
2366 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2367 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2368 readl(mmio + PCI_IRQ_CAUSE_OFS),
2369 readl(mmio + PCI_IRQ_MASK_OFS));
2376 * mv_print_info - Dump key info to kernel log for perusal.
2377 * @host: ATA host to print info about
2379 * FIXME: complete this.
2382 * Inherited from caller.
2384 static void mv_print_info(struct ata_host *host)
2386 struct pci_dev *pdev = to_pci_dev(host->dev);
2387 struct mv_host_priv *hpriv = host->private_data;
2389 const char *scc_s, *gen;
2391 /* Use this to determine the HW stepping of the chip so we know
2392 * what errata to workaround
2394 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
2396 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2399 else if (scc == 0x01)
2404 if (IS_GEN_I(hpriv))
2406 else if (IS_GEN_II(hpriv))
2408 else if (IS_GEN_IIE(hpriv))
2413 dev_printk(KERN_INFO, &pdev->dev,
2414 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2415 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2416 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2420 * mv_init_one - handle a positive probe of a Marvell host
2421 * @pdev: PCI device found
2422 * @ent: PCI device ID entry for the matched host
2425 * Inherited from caller.
2427 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2429 static int printed_version = 0;
2430 unsigned int board_idx = (unsigned int)ent->driver_data;
2431 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2432 struct ata_host *host;
2433 struct mv_host_priv *hpriv;
2436 if (!printed_version++)
2437 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2440 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2442 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2443 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2444 if (!host || !hpriv)
2446 host->private_data = hpriv;
2448 /* acquire resources */
2449 rc = pcim_enable_device(pdev);
2453 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2455 pcim_pin_device(pdev);
2458 host->iomap = pcim_iomap_table(pdev);
2460 rc = pci_go_64(pdev);
2464 /* initialize adapter */
2465 rc = mv_init_host(host, board_idx);
2469 /* Enable interrupts */
2470 if (msi && pci_enable_msi(pdev))
2473 mv_dump_pci_cfg(pdev, 0x68);
2474 mv_print_info(host);
2476 pci_set_master(pdev);
2477 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2478 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2481 static int __init mv_init(void)
2483 return pci_register_driver(&mv_pci_driver);
2486 static void __exit mv_exit(void)
2488 pci_unregister_driver(&mv_pci_driver);
2491 MODULE_AUTHOR("Brett Russ");
2492 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2493 MODULE_LICENSE("GPL");
2494 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2495 MODULE_VERSION(DRV_VERSION);
2497 module_param(msi, int, 0444);
2498 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2500 module_init(mv_init);
2501 module_exit(mv_exit);