2 * sata_mv.c - Marvell SATA support
4 * Copyright 2005: EMC Corporation, all rights reserved.
5 * Copyright 2005 Red Hat, Inc. All rights reserved.
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
32 4) Add NCQ support (easy to intermediate, once new-EH support appears)
34 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
36 6) Add port multiplier support (intermediate)
38 7) Test and verify 3.0 Gbps support
40 8) Develop a low-power-consumption strategy, and implement it.
42 9) [Experiment, low priority] See if ATAPI can be supported using
43 "unknown FIS" or "vendor-specific FIS" support, or something creative
46 10) [Experiment, low priority] Investigate interrupt coalescing.
47 Quite often, especially with PCI Message Signalled Interrupts (MSI),
48 the overhead reduced by interrupt mitigation is quite often not
49 worth the latency cost.
51 11) [Experiment, Marvell value added] Is it possible to use target
52 mode to cross-connect two Linux boxes with Marvell cards? If so,
53 creating LibATA target mode support would be very interesting.
55 Target mode, for those without docs, is the ability to directly
56 connect two SATA controllers.
58 13) Verify that 7042 is fully supported. I only have a 6042.
63 #include <linux/kernel.h>
64 #include <linux/module.h>
65 #include <linux/pci.h>
66 #include <linux/init.h>
67 #include <linux/blkdev.h>
68 #include <linux/delay.h>
69 #include <linux/interrupt.h>
70 #include <linux/dma-mapping.h>
71 #include <linux/device.h>
72 #include <scsi/scsi_host.h>
73 #include <scsi/scsi_cmnd.h>
74 #include <linux/libata.h>
76 #define DRV_NAME "sata_mv"
77 #define DRV_VERSION "0.81"
80 /* BAR's are enumerated in terms of pci_resource_start() terms */
81 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
82 MV_IO_BAR = 2, /* offset 0x18: IO space */
83 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
85 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
86 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
89 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
90 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
91 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
92 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
93 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
94 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
96 MV_SATAHC0_REG_BASE = 0x20000,
97 MV_FLASH_CTL = 0x1046c,
98 MV_GPIO_PORT_CTL = 0x104f0,
99 MV_RESET_CFG = 0x180d8,
101 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
102 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
103 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
104 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
107 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
109 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
110 * CRPB needs alignment on a 256B boundary. Size == 256B
111 * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
112 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
114 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
115 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
117 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
118 MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
121 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
122 MV_PORT_HC_SHIFT = 2,
123 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
127 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
128 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
129 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
130 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
131 ATA_FLAG_PIO_POLLING,
132 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
134 CRQB_FLAG_READ = (1 << 0),
136 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
137 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
138 CRQB_CMD_ADDR_SHIFT = 8,
139 CRQB_CMD_CS = (0x2 << 11),
140 CRQB_CMD_LAST = (1 << 15),
142 CRPB_FLAG_STATUS_SHIFT = 8,
143 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
144 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
146 EPRD_FLAG_END_OF_TBL = (1 << 31),
148 /* PCI interface registers */
150 PCI_COMMAND_OFS = 0xc00,
152 PCI_MAIN_CMD_STS_OFS = 0xd30,
153 STOP_PCI_MASTER = (1 << 2),
154 PCI_MASTER_EMPTY = (1 << 3),
155 GLOB_SFT_RST = (1 << 4),
158 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
159 MV_PCI_DISC_TIMER = 0xd04,
160 MV_PCI_MSI_TRIGGER = 0xc38,
161 MV_PCI_SERR_MASK = 0xc28,
162 MV_PCI_XBAR_TMOUT = 0x1d04,
163 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
164 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
165 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
166 MV_PCI_ERR_COMMAND = 0x1d50,
168 PCI_IRQ_CAUSE_OFS = 0x1d58,
169 PCI_IRQ_MASK_OFS = 0x1d5c,
170 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
172 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
173 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
174 PORT0_ERR = (1 << 0), /* shift by port # */
175 PORT0_DONE = (1 << 1), /* shift by port # */
176 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
177 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
179 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
180 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
181 PORTS_0_3_COAL_DONE = (1 << 8),
182 PORTS_4_7_COAL_DONE = (1 << 17),
183 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
184 GPIO_INT = (1 << 22),
185 SELF_INT = (1 << 23),
186 TWSI_INT = (1 << 24),
187 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
188 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
189 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
190 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
192 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
195 /* SATAHC registers */
198 HC_IRQ_CAUSE_OFS = 0x14,
199 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
200 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
201 DEV_IRQ = (1 << 8), /* shift by port # */
203 /* Shadow block registers */
205 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
208 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
209 SATA_ACTIVE_OFS = 0x350,
216 SATA_INTERFACE_CTL = 0x050,
218 MV_M2_PREAMP_MASK = 0x7e0,
222 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */
223 EDMA_CFG_NCQ = (1 << 5),
224 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
225 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
226 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
228 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
229 EDMA_ERR_IRQ_MASK_OFS = 0xc,
230 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
231 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
232 EDMA_ERR_DEV = (1 << 2), /* device error */
233 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
234 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
235 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
236 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
237 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
238 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
239 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
240 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
241 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
242 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
243 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
244 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
245 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15),
246 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
247 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
248 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
249 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
250 EDMA_ERR_OVERRUN_5 = (1 << 5),
251 EDMA_ERR_UNDERRUN_5 = (1 << 6),
252 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
262 EDMA_ERR_LNK_CTRL_RX_2 |
263 EDMA_ERR_LNK_DATA_RX |
264 EDMA_ERR_LNK_DATA_TX |
265 EDMA_ERR_TRANS_PROTO,
266 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
271 EDMA_ERR_UNDERRUN_5 |
272 EDMA_ERR_SELF_DIS_5 |
278 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
279 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
281 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
282 EDMA_REQ_Q_PTR_SHIFT = 5,
284 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
285 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
286 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
287 EDMA_RSP_Q_PTR_SHIFT = 3,
294 EDMA_IORDY_TMOUT = 0x34,
297 /* Host private flags (hp_flags) */
298 MV_HP_FLAG_MSI = (1 << 0),
299 MV_HP_ERRATA_50XXB0 = (1 << 1),
300 MV_HP_ERRATA_50XXB2 = (1 << 2),
301 MV_HP_ERRATA_60X1B2 = (1 << 3),
302 MV_HP_ERRATA_60X1C0 = (1 << 4),
303 MV_HP_ERRATA_XX42A0 = (1 << 5),
304 MV_HP_GEN_I = (1 << 6),
305 MV_HP_GEN_II = (1 << 7),
306 MV_HP_GEN_IIE = (1 << 8),
308 /* Port private flags (pp_flags) */
309 MV_PP_FLAG_EDMA_EN = (1 << 0),
310 MV_PP_FLAG_EDMA_DS_ACT = (1 << 1),
311 MV_PP_FLAG_HAD_A_RESET = (1 << 2),
314 #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
315 #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
316 #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
319 MV_DMA_BOUNDARY = 0xffffffffU,
321 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
323 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
336 /* Command ReQuest Block: 32B */
352 /* Command ResPonse Block: 8B */
359 /* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
367 struct mv_port_priv {
368 struct mv_crqb *crqb;
370 struct mv_crpb *crpb;
372 struct mv_sg *sg_tbl;
373 dma_addr_t sg_tbl_dma;
375 unsigned int req_idx;
376 unsigned int resp_idx;
381 struct mv_port_signal {
388 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
390 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
391 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
393 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
395 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
396 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
399 struct mv_host_priv {
401 struct mv_port_signal signal[8];
402 const struct mv_hw_ops *ops;
405 static void mv_irq_clear(struct ata_port *ap);
406 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
407 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
408 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
409 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
410 static int mv_port_start(struct ata_port *ap);
411 static void mv_port_stop(struct ata_port *ap);
412 static void mv_qc_prep(struct ata_queued_cmd *qc);
413 static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
414 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
415 static void mv_error_handler(struct ata_port *ap);
416 static void mv_post_int_cmd(struct ata_queued_cmd *qc);
417 static void mv_eh_freeze(struct ata_port *ap);
418 static void mv_eh_thaw(struct ata_port *ap);
419 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
421 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
423 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
424 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
426 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
428 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
429 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
431 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
433 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
434 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
436 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
438 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
439 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
440 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
441 unsigned int port_no);
443 static struct scsi_host_template mv5_sht = {
444 .module = THIS_MODULE,
446 .ioctl = ata_scsi_ioctl,
447 .queuecommand = ata_scsi_queuecmd,
448 .can_queue = ATA_DEF_QUEUE,
449 .this_id = ATA_SHT_THIS_ID,
450 .sg_tablesize = MV_MAX_SG_CT,
451 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
452 .emulated = ATA_SHT_EMULATED,
454 .proc_name = DRV_NAME,
455 .dma_boundary = MV_DMA_BOUNDARY,
456 .slave_configure = ata_scsi_slave_config,
457 .slave_destroy = ata_scsi_slave_destroy,
458 .bios_param = ata_std_bios_param,
461 static struct scsi_host_template mv6_sht = {
462 .module = THIS_MODULE,
464 .ioctl = ata_scsi_ioctl,
465 .queuecommand = ata_scsi_queuecmd,
466 .can_queue = ATA_DEF_QUEUE,
467 .this_id = ATA_SHT_THIS_ID,
468 .sg_tablesize = MV_MAX_SG_CT,
469 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
470 .emulated = ATA_SHT_EMULATED,
472 .proc_name = DRV_NAME,
473 .dma_boundary = MV_DMA_BOUNDARY,
474 .slave_configure = ata_scsi_slave_config,
475 .slave_destroy = ata_scsi_slave_destroy,
476 .bios_param = ata_std_bios_param,
479 static const struct ata_port_operations mv5_ops = {
480 .port_disable = ata_port_disable,
482 .tf_load = ata_tf_load,
483 .tf_read = ata_tf_read,
484 .check_status = ata_check_status,
485 .exec_command = ata_exec_command,
486 .dev_select = ata_std_dev_select,
488 .cable_detect = ata_cable_sata,
490 .qc_prep = mv_qc_prep,
491 .qc_issue = mv_qc_issue,
492 .data_xfer = ata_data_xfer,
494 .irq_clear = mv_irq_clear,
495 .irq_on = ata_irq_on,
496 .irq_ack = ata_irq_ack,
498 .error_handler = mv_error_handler,
499 .post_internal_cmd = mv_post_int_cmd,
500 .freeze = mv_eh_freeze,
503 .scr_read = mv5_scr_read,
504 .scr_write = mv5_scr_write,
506 .port_start = mv_port_start,
507 .port_stop = mv_port_stop,
510 static const struct ata_port_operations mv6_ops = {
511 .port_disable = ata_port_disable,
513 .tf_load = ata_tf_load,
514 .tf_read = ata_tf_read,
515 .check_status = ata_check_status,
516 .exec_command = ata_exec_command,
517 .dev_select = ata_std_dev_select,
519 .cable_detect = ata_cable_sata,
521 .qc_prep = mv_qc_prep,
522 .qc_issue = mv_qc_issue,
523 .data_xfer = ata_data_xfer,
525 .irq_clear = mv_irq_clear,
526 .irq_on = ata_irq_on,
527 .irq_ack = ata_irq_ack,
529 .error_handler = mv_error_handler,
530 .post_internal_cmd = mv_post_int_cmd,
531 .freeze = mv_eh_freeze,
534 .scr_read = mv_scr_read,
535 .scr_write = mv_scr_write,
537 .port_start = mv_port_start,
538 .port_stop = mv_port_stop,
541 static const struct ata_port_operations mv_iie_ops = {
542 .port_disable = ata_port_disable,
544 .tf_load = ata_tf_load,
545 .tf_read = ata_tf_read,
546 .check_status = ata_check_status,
547 .exec_command = ata_exec_command,
548 .dev_select = ata_std_dev_select,
550 .cable_detect = ata_cable_sata,
552 .qc_prep = mv_qc_prep_iie,
553 .qc_issue = mv_qc_issue,
554 .data_xfer = ata_data_xfer,
556 .irq_clear = mv_irq_clear,
557 .irq_on = ata_irq_on,
558 .irq_ack = ata_irq_ack,
560 .error_handler = mv_error_handler,
561 .post_internal_cmd = mv_post_int_cmd,
562 .freeze = mv_eh_freeze,
565 .scr_read = mv_scr_read,
566 .scr_write = mv_scr_write,
568 .port_start = mv_port_start,
569 .port_stop = mv_port_stop,
572 static const struct ata_port_info mv_port_info[] = {
574 .flags = MV_COMMON_FLAGS,
575 .pio_mask = 0x1f, /* pio0-4 */
576 .udma_mask = ATA_UDMA6,
577 .port_ops = &mv5_ops,
580 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
581 .pio_mask = 0x1f, /* pio0-4 */
582 .udma_mask = ATA_UDMA6,
583 .port_ops = &mv5_ops,
586 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
587 .pio_mask = 0x1f, /* pio0-4 */
588 .udma_mask = ATA_UDMA6,
589 .port_ops = &mv5_ops,
592 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
593 .pio_mask = 0x1f, /* pio0-4 */
594 .udma_mask = ATA_UDMA6,
595 .port_ops = &mv6_ops,
598 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
600 .pio_mask = 0x1f, /* pio0-4 */
601 .udma_mask = ATA_UDMA6,
602 .port_ops = &mv6_ops,
605 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
606 .pio_mask = 0x1f, /* pio0-4 */
607 .udma_mask = ATA_UDMA6,
608 .port_ops = &mv_iie_ops,
611 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
612 .pio_mask = 0x1f, /* pio0-4 */
613 .udma_mask = ATA_UDMA6,
614 .port_ops = &mv_iie_ops,
618 static const struct pci_device_id mv_pci_tbl[] = {
619 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
620 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
621 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
622 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
624 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
625 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
626 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
627 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
628 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
630 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
633 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
635 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
637 /* add Marvell 7042 support */
638 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
640 { } /* terminate list */
643 static struct pci_driver mv_pci_driver = {
645 .id_table = mv_pci_tbl,
646 .probe = mv_init_one,
647 .remove = ata_pci_remove_one,
650 static const struct mv_hw_ops mv5xxx_ops = {
651 .phy_errata = mv5_phy_errata,
652 .enable_leds = mv5_enable_leds,
653 .read_preamp = mv5_read_preamp,
654 .reset_hc = mv5_reset_hc,
655 .reset_flash = mv5_reset_flash,
656 .reset_bus = mv5_reset_bus,
659 static const struct mv_hw_ops mv6xxx_ops = {
660 .phy_errata = mv6_phy_errata,
661 .enable_leds = mv6_enable_leds,
662 .read_preamp = mv6_read_preamp,
663 .reset_hc = mv6_reset_hc,
664 .reset_flash = mv6_reset_flash,
665 .reset_bus = mv_reset_pci_bus,
671 static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
674 /* move to PCI layer or libata core? */
675 static int pci_go_64(struct pci_dev *pdev)
679 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
680 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
682 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
684 dev_printk(KERN_ERR, &pdev->dev,
685 "64-bit DMA enable failed\n");
690 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
692 dev_printk(KERN_ERR, &pdev->dev,
693 "32-bit DMA enable failed\n");
696 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
698 dev_printk(KERN_ERR, &pdev->dev,
699 "32-bit consistent DMA enable failed\n");
711 static inline void writelfl(unsigned long data, void __iomem *addr)
714 (void) readl(addr); /* flush to avoid PCI posted write */
717 static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
719 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
722 static inline unsigned int mv_hc_from_port(unsigned int port)
724 return port >> MV_PORT_HC_SHIFT;
727 static inline unsigned int mv_hardport_from_port(unsigned int port)
729 return port & MV_PORT_MASK;
732 static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
735 return mv_hc_base(base, mv_hc_from_port(port));
738 static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
740 return mv_hc_base_from_port(base, port) +
741 MV_SATAHC_ARBTR_REG_SZ +
742 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
745 static inline void __iomem *mv_ap_base(struct ata_port *ap)
747 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
750 static inline int mv_get_hc_count(unsigned long port_flags)
752 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
755 static void mv_irq_clear(struct ata_port *ap)
759 static void mv_set_edma_ptrs(void __iomem *port_mmio,
760 struct mv_host_priv *hpriv,
761 struct mv_port_priv *pp)
766 * initialize request queue
768 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
770 WARN_ON(pp->crqb_dma & 0x3ff);
771 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
772 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
773 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
775 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
776 writelfl((pp->crqb_dma & 0xffffffff) | index,
777 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
779 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
782 * initialize response queue
784 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
786 WARN_ON(pp->crpb_dma & 0xff);
787 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
790 writelfl((pp->crpb_dma & 0xffffffff) | index,
791 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
793 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
795 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
796 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
800 * mv_start_dma - Enable eDMA engine
801 * @base: port base address
802 * @pp: port private data
804 * Verify the local cache of the eDMA state is accurate with a
808 * Inherited from caller.
810 static void mv_start_dma(void __iomem *base, struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
813 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
814 /* clear EDMA event indicators, if any */
815 writelfl(0, base + EDMA_ERR_IRQ_CAUSE_OFS);
817 mv_set_edma_ptrs(base, hpriv, pp);
819 writelfl(EDMA_EN, base + EDMA_CMD_OFS);
820 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
822 WARN_ON(!(EDMA_EN & readl(base + EDMA_CMD_OFS)));
826 * mv_stop_dma - Disable eDMA engine
827 * @ap: ATA channel to manipulate
829 * Verify the local cache of the eDMA state is accurate with a
833 * Inherited from caller.
835 static int mv_stop_dma(struct ata_port *ap)
837 void __iomem *port_mmio = mv_ap_base(ap);
838 struct mv_port_priv *pp = ap->private_data;
842 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
843 /* Disable EDMA if active. The disable bit auto clears.
845 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
846 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
848 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
851 /* now properly wait for the eDMA to stop */
852 for (i = 1000; i > 0; i--) {
853 reg = readl(port_mmio + EDMA_CMD_OFS);
854 if (!(reg & EDMA_EN))
861 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
869 static void mv_dump_mem(void __iomem *start, unsigned bytes)
872 for (b = 0; b < bytes; ) {
873 DPRINTK("%p: ", start + b);
874 for (w = 0; b < bytes && w < 4; w++) {
875 printk("%08x ",readl(start + b));
883 static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
888 for (b = 0; b < bytes; ) {
889 DPRINTK("%02x: ", b);
890 for (w = 0; b < bytes && w < 4; w++) {
891 (void) pci_read_config_dword(pdev,b,&dw);
899 static void mv_dump_all_regs(void __iomem *mmio_base, int port,
900 struct pci_dev *pdev)
903 void __iomem *hc_base = mv_hc_base(mmio_base,
904 port >> MV_PORT_HC_SHIFT);
905 void __iomem *port_base;
906 int start_port, num_ports, p, start_hc, num_hcs, hc;
909 start_hc = start_port = 0;
910 num_ports = 8; /* shld be benign for 4 port devs */
913 start_hc = port >> MV_PORT_HC_SHIFT;
915 num_ports = num_hcs = 1;
917 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
918 num_ports > 1 ? num_ports - 1 : start_port);
921 DPRINTK("PCI config space regs:\n");
922 mv_dump_pci_cfg(pdev, 0x68);
924 DPRINTK("PCI regs:\n");
925 mv_dump_mem(mmio_base+0xc00, 0x3c);
926 mv_dump_mem(mmio_base+0xd00, 0x34);
927 mv_dump_mem(mmio_base+0xf00, 0x4);
928 mv_dump_mem(mmio_base+0x1d00, 0x6c);
929 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
930 hc_base = mv_hc_base(mmio_base, hc);
931 DPRINTK("HC regs (HC %i):\n", hc);
932 mv_dump_mem(hc_base, 0x1c);
934 for (p = start_port; p < start_port + num_ports; p++) {
935 port_base = mv_port_base(mmio_base, p);
936 DPRINTK("EDMA regs (port %i):\n",p);
937 mv_dump_mem(port_base, 0x54);
938 DPRINTK("SATA regs (port %i):\n",p);
939 mv_dump_mem(port_base+0x300, 0x60);
944 static unsigned int mv_scr_offset(unsigned int sc_reg_in)
952 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
955 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
964 static u32 mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
966 unsigned int ofs = mv_scr_offset(sc_reg_in);
968 if (0xffffffffU != ofs)
969 return readl(mv_ap_base(ap) + ofs);
974 static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
976 unsigned int ofs = mv_scr_offset(sc_reg_in);
978 if (0xffffffffU != ofs)
979 writelfl(val, mv_ap_base(ap) + ofs);
982 static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
983 void __iomem *port_mmio)
985 u32 cfg = readl(port_mmio + EDMA_CFG_OFS);
987 /* set up non-NCQ EDMA configuration */
988 cfg &= ~(1 << 9); /* disable eQue */
990 if (IS_GEN_I(hpriv)) {
991 cfg &= ~0x1f; /* clear queue depth */
992 cfg |= (1 << 8); /* enab config burst size mask */
995 else if (IS_GEN_II(hpriv)) {
996 cfg &= ~0x1f; /* clear queue depth */
997 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
998 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1001 else if (IS_GEN_IIE(hpriv)) {
1002 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1003 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1004 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1005 cfg |= (1 << 18); /* enab early completion */
1006 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1007 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1008 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1011 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1015 * mv_port_start - Port specific init/start routine.
1016 * @ap: ATA channel to manipulate
1018 * Allocate and point to DMA memory, init port private memory,
1022 * Inherited from caller.
1024 static int mv_port_start(struct ata_port *ap)
1026 struct device *dev = ap->host->dev;
1027 struct mv_host_priv *hpriv = ap->host->private_data;
1028 struct mv_port_priv *pp;
1029 void __iomem *port_mmio = mv_ap_base(ap);
1034 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1038 mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
1042 memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
1044 rc = ata_pad_alloc(ap, dev);
1048 /* First item in chunk of DMA memory:
1049 * 32-slot command request table (CRQB), 32 bytes each in size
1052 pp->crqb_dma = mem_dma;
1053 mem += MV_CRQB_Q_SZ;
1054 mem_dma += MV_CRQB_Q_SZ;
1057 * 32-slot command response table (CRPB), 8 bytes each in size
1060 pp->crpb_dma = mem_dma;
1061 mem += MV_CRPB_Q_SZ;
1062 mem_dma += MV_CRPB_Q_SZ;
1065 * Table of scatter-gather descriptors (ePRD), 16 bytes each
1068 pp->sg_tbl_dma = mem_dma;
1070 mv_edma_cfg(ap, hpriv, port_mmio);
1072 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1074 /* Don't turn on EDMA here...do it before DMA commands only. Else
1075 * we'll be unable to send non-data, PIO, etc due to restricted access
1078 ap->private_data = pp;
1083 * mv_port_stop - Port specific cleanup/stop routine.
1084 * @ap: ATA channel to manipulate
1086 * Stop DMA, cleanup port memory.
1089 * This routine uses the host lock to protect the DMA stop.
1091 static void mv_port_stop(struct ata_port *ap)
1093 unsigned long flags;
1095 spin_lock_irqsave(&ap->host->lock, flags);
1097 spin_unlock_irqrestore(&ap->host->lock, flags);
1101 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1102 * @qc: queued command whose SG list to source from
1104 * Populate the SG list and mark the last entry.
1107 * Inherited from caller.
1109 static unsigned int mv_fill_sg(struct ata_queued_cmd *qc)
1111 struct mv_port_priv *pp = qc->ap->private_data;
1112 unsigned int n_sg = 0;
1113 struct scatterlist *sg;
1114 struct mv_sg *mv_sg;
1117 ata_for_each_sg(sg, qc) {
1118 dma_addr_t addr = sg_dma_address(sg);
1119 u32 sg_len = sg_dma_len(sg);
1121 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1122 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
1123 mv_sg->flags_size = cpu_to_le32(sg_len & 0xffff);
1125 if (ata_sg_is_last(sg, qc))
1126 mv_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
1135 static inline void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1137 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
1138 (last ? CRQB_CMD_LAST : 0);
1139 *cmdw = cpu_to_le16(tmp);
1143 * mv_qc_prep - Host specific command preparation.
1144 * @qc: queued command to prepare
1146 * This routine simply redirects to the general purpose routine
1147 * if command is not DMA. Else, it handles prep of the CRQB
1148 * (command request block), does some sanity checking, and calls
1149 * the SG load routine.
1152 * Inherited from caller.
1154 static void mv_qc_prep(struct ata_queued_cmd *qc)
1156 struct ata_port *ap = qc->ap;
1157 struct mv_port_priv *pp = ap->private_data;
1159 struct ata_taskfile *tf;
1163 if (qc->tf.protocol != ATA_PROT_DMA)
1166 /* Fill in command request block
1168 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1169 flags |= CRQB_FLAG_READ;
1170 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1171 flags |= qc->tag << CRQB_TAG_SHIFT;
1172 flags |= qc->tag << CRQB_IOID_SHIFT; /* 50xx appears to ignore this*/
1174 /* get current queue index from software */
1175 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1177 pp->crqb[in_index].sg_addr =
1178 cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1179 pp->crqb[in_index].sg_addr_hi =
1180 cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1181 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1183 cw = &pp->crqb[in_index].ata_cmd[0];
1186 /* Sadly, the CRQB cannot accomodate all registers--there are
1187 * only 11 bytes...so we must pick and choose required
1188 * registers based on the command. So, we drop feature and
1189 * hob_feature for [RW] DMA commands, but they are needed for
1190 * NCQ. NCQ will drop hob_nsect.
1192 switch (tf->command) {
1194 case ATA_CMD_READ_EXT:
1196 case ATA_CMD_WRITE_EXT:
1197 case ATA_CMD_WRITE_FUA_EXT:
1198 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1200 #ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
1201 case ATA_CMD_FPDMA_READ:
1202 case ATA_CMD_FPDMA_WRITE:
1203 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
1204 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1206 #endif /* FIXME: remove this line when NCQ added */
1208 /* The only other commands EDMA supports in non-queued and
1209 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1210 * of which are defined/used by Linux. If we get here, this
1211 * driver needs work.
1213 * FIXME: modify libata to give qc_prep a return value and
1214 * return error here.
1216 BUG_ON(tf->command);
1219 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1220 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1221 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1222 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1223 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1224 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1225 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1226 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1227 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1229 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1235 * mv_qc_prep_iie - Host specific command preparation.
1236 * @qc: queued command to prepare
1238 * This routine simply redirects to the general purpose routine
1239 * if command is not DMA. Else, it handles prep of the CRQB
1240 * (command request block), does some sanity checking, and calls
1241 * the SG load routine.
1244 * Inherited from caller.
1246 static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1248 struct ata_port *ap = qc->ap;
1249 struct mv_port_priv *pp = ap->private_data;
1250 struct mv_crqb_iie *crqb;
1251 struct ata_taskfile *tf;
1255 if (qc->tf.protocol != ATA_PROT_DMA)
1258 /* Fill in Gen IIE command request block
1260 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1261 flags |= CRQB_FLAG_READ;
1263 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
1264 flags |= qc->tag << CRQB_TAG_SHIFT;
1265 flags |= qc->tag << CRQB_IOID_SHIFT; /* "I/O Id" is -really-
1266 what we use as our tag */
1268 /* get current queue index from software */
1269 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1271 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
1272 crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
1273 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
1274 crqb->flags = cpu_to_le32(flags);
1277 crqb->ata_cmd[0] = cpu_to_le32(
1278 (tf->command << 16) |
1281 crqb->ata_cmd[1] = cpu_to_le32(
1287 crqb->ata_cmd[2] = cpu_to_le32(
1288 (tf->hob_lbal << 0) |
1289 (tf->hob_lbam << 8) |
1290 (tf->hob_lbah << 16) |
1291 (tf->hob_feature << 24)
1293 crqb->ata_cmd[3] = cpu_to_le32(
1295 (tf->hob_nsect << 8)
1298 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1304 * mv_qc_issue - Initiate a command to the host
1305 * @qc: queued command to start
1307 * This routine simply redirects to the general purpose routine
1308 * if command is not DMA. Else, it sanity checks our local
1309 * caches of the request producer/consumer indices then enables
1310 * DMA and bumps the request producer index.
1313 * Inherited from caller.
1315 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1317 struct ata_port *ap = qc->ap;
1318 void __iomem *port_mmio = mv_ap_base(ap);
1319 struct mv_port_priv *pp = ap->private_data;
1320 struct mv_host_priv *hpriv = ap->host->private_data;
1323 if (qc->tf.protocol != ATA_PROT_DMA) {
1324 /* We're about to send a non-EDMA capable command to the
1325 * port. Turn off EDMA so there won't be problems accessing
1326 * shadow block, etc registers.
1329 return ata_qc_issue_prot(qc);
1332 mv_start_dma(port_mmio, hpriv, pp);
1334 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1336 /* until we do queuing, the queue should be empty at this point */
1337 WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1338 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
1342 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
1344 /* and write the request in pointer to kick the EDMA to life */
1345 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1346 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
1352 * mv_err_intr - Handle error interrupts on the port
1353 * @ap: ATA channel to manipulate
1354 * @reset_allowed: bool: 0 == don't trigger from reset here
1356 * In most cases, just clear the interrupt and move on. However,
1357 * some cases require an eDMA reset, which is done right before
1358 * the COMRESET in mv_phy_reset(). The SERR case requires a
1359 * clear of pending errors in the SATA SERROR register. Finally,
1360 * if the port disabled DMA, update our cached copy to match.
1363 * Inherited from caller.
1365 static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1367 void __iomem *port_mmio = mv_ap_base(ap);
1368 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_host_priv *hpriv = ap->host->private_data;
1371 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1372 unsigned int action = 0, err_mask = 0;
1373 struct ata_eh_info *ehi = &ap->eh_info;
1375 ata_ehi_clear_desc(ehi);
1377 if (!edma_enabled) {
1378 /* just a guess: do we need to do this? should we
1379 * expand this, and do it in all cases?
1381 sata_scr_read(ap, SCR_ERROR, &serr);
1382 sata_scr_write_flush(ap, SCR_ERROR, serr);
1385 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1387 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1390 * all generations share these EDMA error cause bits
1393 if (edma_err_cause & EDMA_ERR_DEV)
1394 err_mask |= AC_ERR_DEV;
1395 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
1396 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
1397 EDMA_ERR_INTRL_PAR)) {
1398 err_mask |= AC_ERR_ATA_BUS;
1399 action |= ATA_EH_HARDRESET;
1400 ata_ehi_push_desc(ehi, ", parity error");
1402 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1403 ata_ehi_hotplugged(ehi);
1404 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
1405 ", dev disconnect" : ", dev connect");
1408 if (IS_GEN_I(hpriv)) {
1409 eh_freeze_mask = EDMA_EH_FREEZE_5;
1411 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1412 struct mv_port_priv *pp = ap->private_data;
1413 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1414 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1417 eh_freeze_mask = EDMA_EH_FREEZE;
1419 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1420 struct mv_port_priv *pp = ap->private_data;
1421 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1422 ata_ehi_push_desc(ehi, ", EDMA self-disable");
1425 if (edma_err_cause & EDMA_ERR_SERR) {
1426 sata_scr_read(ap, SCR_ERROR, &serr);
1427 sata_scr_write_flush(ap, SCR_ERROR, serr);
1428 err_mask = AC_ERR_ATA_BUS;
1429 action |= ATA_EH_HARDRESET;
1433 /* Clear EDMA now that SERR cleanup done */
1434 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1437 err_mask = AC_ERR_OTHER;
1438 action |= ATA_EH_HARDRESET;
1441 ehi->serror |= serr;
1442 ehi->action |= action;
1445 qc->err_mask |= err_mask;
1447 ehi->err_mask |= err_mask;
1449 if (edma_err_cause & eh_freeze_mask)
1450 ata_port_freeze(ap);
1455 static void mv_intr_pio(struct ata_port *ap)
1457 struct ata_queued_cmd *qc;
1460 /* ignore spurious intr if drive still BUSY */
1461 ata_status = readb(ap->ioaddr.status_addr);
1462 if (unlikely(ata_status & ATA_BUSY))
1465 /* get active ATA command */
1466 qc = ata_qc_from_tag(ap, ap->active_tag);
1467 if (unlikely(!qc)) /* no active tag */
1469 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1472 /* and finally, complete the ATA command */
1473 qc->err_mask |= ac_err_mask(ata_status);
1474 ata_qc_complete(qc);
1477 static void mv_intr_edma(struct ata_port *ap)
1479 void __iomem *port_mmio = mv_ap_base(ap);
1480 struct mv_host_priv *hpriv = ap->host->private_data;
1481 struct mv_port_priv *pp = ap->private_data;
1482 struct ata_queued_cmd *qc;
1483 u32 out_index, in_index;
1484 bool work_done = false;
1486 /* get h/w response queue pointer */
1487 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1488 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1494 /* get s/w response queue last-read pointer, and compare */
1495 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1496 if (in_index == out_index)
1499 /* 50xx: get active ATA command */
1500 if (IS_GEN_I(hpriv))
1501 tag = ap->active_tag;
1503 /* Gen II/IIE: get active ATA command via tag, to enable
1504 * support for queueing. this works transparently for
1505 * queued and non-queued modes.
1507 else if (IS_GEN_II(hpriv))
1508 tag = (le16_to_cpu(pp->crpb[out_index].id)
1509 >> CRPB_IOID_SHIFT_6) & 0x3f;
1511 else /* IS_GEN_IIE */
1512 tag = (le16_to_cpu(pp->crpb[out_index].id)
1513 >> CRPB_IOID_SHIFT_7) & 0x3f;
1515 qc = ata_qc_from_tag(ap, tag);
1517 /* lower 8 bits of status are EDMA_ERR_IRQ_CAUSE_OFS
1518 * bits (WARNING: might not necessarily be associated
1519 * with this command), which -should- be clear
1522 status = le16_to_cpu(pp->crpb[out_index].flags);
1523 if (unlikely(status & 0xff)) {
1524 mv_err_intr(ap, qc);
1528 /* and finally, complete the ATA command */
1531 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1532 ata_qc_complete(qc);
1535 /* advance software response queue pointer, to
1536 * indicate (after the loop completes) to hardware
1537 * that we have consumed a response queue entry.
1544 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1545 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1546 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
1550 * mv_host_intr - Handle all interrupts on the given host controller
1551 * @host: host specific structure
1552 * @relevant: port error bits relevant to this host controller
1553 * @hc: which host controller we're to look at
1555 * Read then write clear the HC interrupt status then walk each
1556 * port connected to the HC and see if it needs servicing. Port
1557 * success ints are reported in the HC interrupt status reg, the
1558 * port error ints are reported in the higher level main
1559 * interrupt status register and thus are passed in via the
1560 * 'relevant' argument.
1563 * Inherited from caller.
1565 static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1567 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1568 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1575 port0 = MV_PORTS_PER_HC;
1577 /* we'll need the HC success int register in most cases */
1578 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1582 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1584 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
1585 hc,relevant,hc_irq_cause);
1587 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1588 struct ata_port *ap = host->ports[port];
1589 struct mv_port_priv *pp = ap->private_data;
1590 int have_err_bits, hard_port, shift;
1592 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1595 shift = port << 1; /* (port * 2) */
1596 if (port >= MV_PORTS_PER_HC) {
1597 shift++; /* skip bit 8 in the HC Main IRQ reg */
1599 have_err_bits = ((PORT0_ERR << shift) & relevant);
1601 if (unlikely(have_err_bits)) {
1602 struct ata_queued_cmd *qc;
1604 qc = ata_qc_from_tag(ap, ap->active_tag);
1605 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1608 mv_err_intr(ap, qc);
1612 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1614 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1615 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1618 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1625 static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1627 struct ata_port *ap;
1628 struct ata_queued_cmd *qc;
1629 struct ata_eh_info *ehi;
1630 unsigned int i, err_mask, printed = 0;
1633 err_cause = readl(mmio + PCI_IRQ_CAUSE_OFS);
1635 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1638 DPRINTK("All regs @ PCI error\n");
1639 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1641 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1643 for (i = 0; i < host->n_ports; i++) {
1644 ap = host->ports[i];
1645 if (!ata_port_offline(ap)) {
1647 ata_ehi_clear_desc(ehi);
1649 ata_ehi_push_desc(ehi,
1650 "PCI err cause 0x%08x", err_cause);
1651 err_mask = AC_ERR_HOST_BUS;
1652 ehi->action = ATA_EH_HARDRESET;
1653 qc = ata_qc_from_tag(ap, ap->active_tag);
1655 qc->err_mask |= err_mask;
1657 ehi->err_mask |= err_mask;
1659 ata_port_freeze(ap);
1665 * mv_interrupt - Main interrupt event handler
1667 * @dev_instance: private data; in this case the host structure
1669 * Read the read only register to determine if any host
1670 * controllers have pending interrupts. If so, call lower level
1671 * routine to handle. Also check for PCI errors which are only
1675 * This routine holds the host lock while processing pending
1678 static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1680 struct ata_host *host = dev_instance;
1681 unsigned int hc, handled = 0, n_hcs;
1682 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
1685 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
1687 /* check the cases where we either have nothing pending or have read
1688 * a bogus register value which can indicate HW removal or PCI fault
1690 if (!irq_stat || (0xffffffffU == irq_stat))
1693 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1694 spin_lock(&host->lock);
1696 if (unlikely(irq_stat & PCI_ERR)) {
1697 mv_pci_error(host, mmio);
1699 goto out_unlock; /* skip all other HC irq handling */
1702 for (hc = 0; hc < n_hcs; hc++) {
1703 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1705 mv_host_intr(host, relevant, hc);
1711 spin_unlock(&host->lock);
1713 return IRQ_RETVAL(handled);
1716 static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1718 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1719 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1721 return hc_mmio + ofs;
1724 static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1728 switch (sc_reg_in) {
1732 ofs = sc_reg_in * sizeof(u32);
1741 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1743 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1744 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1745 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1747 if (ofs != 0xffffffffU)
1748 return readl(addr + ofs);
1753 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1755 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1756 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
1757 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1759 if (ofs != 0xffffffffU)
1760 writelfl(val, addr + ofs);
1763 static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1767 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
1770 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1772 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1775 mv_reset_pci_bus(pdev, mmio);
1778 static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1780 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1783 static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
1786 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1789 tmp = readl(phy_mmio + MV5_PHY_MODE);
1791 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1792 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
1795 static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
1799 writel(0, mmio + MV_GPIO_PORT_CTL);
1801 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1803 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1805 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1808 static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1811 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1812 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1814 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1817 tmp = readl(phy_mmio + MV5_LT_MODE);
1819 writel(tmp, phy_mmio + MV5_LT_MODE);
1821 tmp = readl(phy_mmio + MV5_PHY_CTL);
1824 writel(tmp, phy_mmio + MV5_PHY_CTL);
1827 tmp = readl(phy_mmio + MV5_PHY_MODE);
1829 tmp |= hpriv->signal[port].pre;
1830 tmp |= hpriv->signal[port].amps;
1831 writel(tmp, phy_mmio + MV5_PHY_MODE);
1836 #define ZERO(reg) writel(0, port_mmio + (reg))
1837 static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1840 void __iomem *port_mmio = mv_port_base(mmio, port);
1842 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1844 mv_channel_reset(hpriv, mmio, port);
1846 ZERO(0x028); /* command */
1847 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1848 ZERO(0x004); /* timer */
1849 ZERO(0x008); /* irq err cause */
1850 ZERO(0x00c); /* irq err mask */
1851 ZERO(0x010); /* rq bah */
1852 ZERO(0x014); /* rq inp */
1853 ZERO(0x018); /* rq outp */
1854 ZERO(0x01c); /* respq bah */
1855 ZERO(0x024); /* respq outp */
1856 ZERO(0x020); /* respq inp */
1857 ZERO(0x02c); /* test control */
1858 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1862 #define ZERO(reg) writel(0, hc_mmio + (reg))
1863 static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1866 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1874 tmp = readl(hc_mmio + 0x20);
1877 writel(tmp, hc_mmio + 0x20);
1881 static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1884 unsigned int hc, port;
1886 for (hc = 0; hc < n_hc; hc++) {
1887 for (port = 0; port < MV_PORTS_PER_HC; port++)
1888 mv5_reset_hc_port(hpriv, mmio,
1889 (hc * MV_PORTS_PER_HC) + port);
1891 mv5_reset_one_hc(hpriv, mmio, hc);
1898 #define ZERO(reg) writel(0, mmio + (reg))
1899 static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
1903 tmp = readl(mmio + MV_PCI_MODE);
1905 writel(tmp, mmio + MV_PCI_MODE);
1907 ZERO(MV_PCI_DISC_TIMER);
1908 ZERO(MV_PCI_MSI_TRIGGER);
1909 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
1910 ZERO(HC_MAIN_IRQ_MASK_OFS);
1911 ZERO(MV_PCI_SERR_MASK);
1912 ZERO(PCI_IRQ_CAUSE_OFS);
1913 ZERO(PCI_IRQ_MASK_OFS);
1914 ZERO(MV_PCI_ERR_LOW_ADDRESS);
1915 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
1916 ZERO(MV_PCI_ERR_ATTRIBUTE);
1917 ZERO(MV_PCI_ERR_COMMAND);
1921 static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1925 mv5_reset_flash(hpriv, mmio);
1927 tmp = readl(mmio + MV_GPIO_PORT_CTL);
1929 tmp |= (1 << 5) | (1 << 6);
1930 writel(tmp, mmio + MV_GPIO_PORT_CTL);
1934 * mv6_reset_hc - Perform the 6xxx global soft reset
1935 * @mmio: base address of the HBA
1937 * This routine only applies to 6xxx parts.
1940 * Inherited from caller.
1942 static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
1949 /* Following procedure defined in PCI "main command and status
1953 writel(t | STOP_PCI_MASTER, reg);
1955 for (i = 0; i < 1000; i++) {
1958 if (PCI_MASTER_EMPTY & t) {
1962 if (!(PCI_MASTER_EMPTY & t)) {
1963 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
1971 writel(t | GLOB_SFT_RST, reg);
1974 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
1976 if (!(GLOB_SFT_RST & t)) {
1977 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
1982 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
1985 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
1988 } while ((GLOB_SFT_RST & t) && (i-- > 0));
1990 if (GLOB_SFT_RST & t) {
1991 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
1998 static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
2001 void __iomem *port_mmio;
2004 tmp = readl(mmio + MV_RESET_CFG);
2005 if ((tmp & (1 << 0)) == 0) {
2006 hpriv->signal[idx].amps = 0x7 << 8;
2007 hpriv->signal[idx].pre = 0x1 << 5;
2011 port_mmio = mv_port_base(mmio, idx);
2012 tmp = readl(port_mmio + PHY_MODE2);
2014 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2015 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2018 static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
2020 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
2023 static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2026 void __iomem *port_mmio = mv_port_base(mmio, port);
2028 u32 hp_flags = hpriv->hp_flags;
2030 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2032 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2035 if (fix_phy_mode2) {
2036 m2 = readl(port_mmio + PHY_MODE2);
2039 writel(m2, port_mmio + PHY_MODE2);
2043 m2 = readl(port_mmio + PHY_MODE2);
2044 m2 &= ~((1 << 16) | (1 << 31));
2045 writel(m2, port_mmio + PHY_MODE2);
2050 /* who knows what this magic does */
2051 tmp = readl(port_mmio + PHY_MODE3);
2054 writel(tmp, port_mmio + PHY_MODE3);
2056 if (fix_phy_mode4) {
2059 m4 = readl(port_mmio + PHY_MODE4);
2061 if (hp_flags & MV_HP_ERRATA_60X1B2)
2062 tmp = readl(port_mmio + 0x310);
2064 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2066 writel(m4, port_mmio + PHY_MODE4);
2068 if (hp_flags & MV_HP_ERRATA_60X1B2)
2069 writel(tmp, port_mmio + 0x310);
2072 /* Revert values of pre-emphasis and signal amps to the saved ones */
2073 m2 = readl(port_mmio + PHY_MODE2);
2075 m2 &= ~MV_M2_PREAMP_MASK;
2076 m2 |= hpriv->signal[port].amps;
2077 m2 |= hpriv->signal[port].pre;
2080 /* according to mvSata 3.6.1, some IIE values are fixed */
2081 if (IS_GEN_IIE(hpriv)) {
2086 writel(m2, port_mmio + PHY_MODE2);
2089 static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2090 unsigned int port_no)
2092 void __iomem *port_mmio = mv_port_base(mmio, port_no);
2094 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2096 if (IS_GEN_II(hpriv)) {
2097 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2098 ifctl |= (1 << 7); /* enable gen2i speed */
2099 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2100 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2103 udelay(25); /* allow reset propagation */
2105 /* Spec never mentions clearing the bit. Marvell's driver does
2106 * clear the bit, however.
2108 writelfl(0, port_mmio + EDMA_CMD_OFS);
2110 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2112 if (IS_GEN_I(hpriv))
2117 * mv_phy_reset - Perform eDMA reset followed by COMRESET
2118 * @ap: ATA channel to manipulate
2120 * Part of this is taken from __sata_phy_reset and modified to
2121 * not sleep since this routine gets called from interrupt level.
2124 * Inherited from caller. This is coded to safe to call at
2125 * interrupt level, i.e. it does not sleep.
2127 static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2128 unsigned long deadline)
2130 struct mv_port_priv *pp = ap->private_data;
2131 struct mv_host_priv *hpriv = ap->host->private_data;
2132 void __iomem *port_mmio = mv_ap_base(ap);
2136 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
2138 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
2139 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2140 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2142 /* Issue COMRESET via SControl */
2144 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2147 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2151 sata_scr_read(ap, SCR_STATUS, &sstatus);
2152 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
2156 } while (time_before(jiffies, deadline));
2158 /* work around errata */
2159 if (IS_GEN_II(hpriv) &&
2160 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2162 goto comreset_retry;
2164 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2165 "SCtrl 0x%08x\n", mv_scr_read(ap, SCR_STATUS),
2166 mv_scr_read(ap, SCR_ERROR), mv_scr_read(ap, SCR_CONTROL));
2168 if (ata_port_offline(ap)) {
2169 *class = ATA_DEV_NONE;
2173 /* even after SStatus reflects that device is ready,
2174 * it seems to take a while for link to be fully
2175 * established (and thus Status no longer 0x80/0x7F),
2176 * so we poll a bit for that, here.
2180 u8 drv_stat = ata_check_status(ap);
2181 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2186 if (time_after(jiffies, deadline))
2190 /* FIXME: if we passed the deadline, the following
2191 * code probably produces an invalid result
2194 /* finally, read device signature from TF registers */
2195 *class = ata_dev_try_classify(ap, 0, NULL);
2197 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2199 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
2204 static int mv_prereset(struct ata_port *ap, unsigned long deadline)
2206 struct mv_port_priv *pp = ap->private_data;
2207 struct ata_eh_context *ehc = &ap->eh_context;
2210 rc = mv_stop_dma(ap);
2212 ehc->i.action |= ATA_EH_HARDRESET;
2214 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2215 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2216 ehc->i.action |= ATA_EH_HARDRESET;
2219 /* if we're about to do hardreset, nothing more to do */
2220 if (ehc->i.action & ATA_EH_HARDRESET)
2223 if (ata_port_online(ap))
2224 rc = ata_wait_ready(ap, deadline);
2231 static int mv_hardreset(struct ata_port *ap, unsigned int *class,
2232 unsigned long deadline)
2234 struct mv_host_priv *hpriv = ap->host->private_data;
2235 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2239 mv_channel_reset(hpriv, mmio, ap->port_no);
2241 mv_phy_reset(ap, class, deadline);
2246 static void mv_postreset(struct ata_port *ap, unsigned int *classes)
2250 /* print link status */
2251 sata_print_link_status(ap);
2254 sata_scr_read(ap, SCR_ERROR, &serr);
2255 sata_scr_write_flush(ap, SCR_ERROR, serr);
2257 /* bail out if no device is present */
2258 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2259 DPRINTK("EXIT, no device\n");
2263 /* set up device control */
2264 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2267 static void mv_error_handler(struct ata_port *ap)
2269 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2270 mv_hardreset, mv_postreset);
2273 static void mv_post_int_cmd(struct ata_queued_cmd *qc)
2275 mv_stop_dma(qc->ap);
2278 static void mv_eh_freeze(struct ata_port *ap)
2280 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2281 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2285 /* FIXME: handle coalescing completion events properly */
2287 shift = ap->port_no * 2;
2291 mask = 0x3 << shift;
2293 /* disable assertion of portN err, done events */
2294 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2295 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2298 static void mv_eh_thaw(struct ata_port *ap)
2300 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2301 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2302 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2303 void __iomem *port_mmio = mv_ap_base(ap);
2304 u32 tmp, mask, hc_irq_cause;
2305 unsigned int shift, hc_port_no = ap->port_no;
2307 /* FIXME: handle coalescing completion events properly */
2309 shift = ap->port_no * 2;
2315 mask = 0x3 << shift;
2317 /* clear EDMA errors on this port */
2318 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2320 /* clear pending irq events */
2321 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2322 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2323 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2324 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2326 /* enable assertion of portN err, done events */
2327 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2328 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2332 * mv_port_init - Perform some early initialization on a single port.
2333 * @port: libata data structure storing shadow register addresses
2334 * @port_mmio: base address of the port
2336 * Initialize shadow register mmio addresses, clear outstanding
2337 * interrupts on the port, and unmask interrupts for the future
2338 * start of the port.
2341 * Inherited from caller.
2343 static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2345 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
2348 /* PIO related setup
2350 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
2352 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2353 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2354 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2355 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2356 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2357 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
2359 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2360 /* special case: control/altstatus doesn't have ATA_REG_ address */
2361 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2364 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
2366 /* Clear any currently outstanding port interrupt conditions */
2367 serr_ofs = mv_scr_offset(SCR_ERROR);
2368 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2369 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2371 /* unmask all EDMA error interrupts */
2372 writelfl(~0, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
2374 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
2375 readl(port_mmio + EDMA_CFG_OFS),
2376 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2377 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
2380 static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2382 struct pci_dev *pdev = to_pci_dev(host->dev);
2383 struct mv_host_priv *hpriv = host->private_data;
2384 u32 hp_flags = hpriv->hp_flags;
2388 hpriv->ops = &mv5xxx_ops;
2389 hp_flags |= MV_HP_GEN_I;
2391 switch (pdev->revision) {
2393 hp_flags |= MV_HP_ERRATA_50XXB0;
2396 hp_flags |= MV_HP_ERRATA_50XXB2;
2399 dev_printk(KERN_WARNING, &pdev->dev,
2400 "Applying 50XXB2 workarounds to unknown rev\n");
2401 hp_flags |= MV_HP_ERRATA_50XXB2;
2408 hpriv->ops = &mv5xxx_ops;
2409 hp_flags |= MV_HP_GEN_I;
2411 switch (pdev->revision) {
2413 hp_flags |= MV_HP_ERRATA_50XXB0;
2416 hp_flags |= MV_HP_ERRATA_50XXB2;
2419 dev_printk(KERN_WARNING, &pdev->dev,
2420 "Applying B2 workarounds to unknown rev\n");
2421 hp_flags |= MV_HP_ERRATA_50XXB2;
2428 hpriv->ops = &mv6xxx_ops;
2429 hp_flags |= MV_HP_GEN_II;
2431 switch (pdev->revision) {
2433 hp_flags |= MV_HP_ERRATA_60X1B2;
2436 hp_flags |= MV_HP_ERRATA_60X1C0;
2439 dev_printk(KERN_WARNING, &pdev->dev,
2440 "Applying B2 workarounds to unknown rev\n");
2441 hp_flags |= MV_HP_ERRATA_60X1B2;
2448 hpriv->ops = &mv6xxx_ops;
2449 hp_flags |= MV_HP_GEN_IIE;
2451 switch (pdev->revision) {
2453 hp_flags |= MV_HP_ERRATA_XX42A0;
2456 hp_flags |= MV_HP_ERRATA_60X1C0;
2459 dev_printk(KERN_WARNING, &pdev->dev,
2460 "Applying 60X1C0 workarounds to unknown rev\n");
2461 hp_flags |= MV_HP_ERRATA_60X1C0;
2467 printk(KERN_ERR DRV_NAME ": BUG: invalid board index %u\n", board_idx);
2471 hpriv->hp_flags = hp_flags;
2477 * mv_init_host - Perform some early initialization of the host.
2478 * @host: ATA host to initialize
2479 * @board_idx: controller index
2481 * If possible, do an early global reset of the host. Then do
2482 * our port init and clear/unmask all/relevant host interrupts.
2485 * Inherited from caller.
2487 static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2489 int rc = 0, n_hc, port, hc;
2490 struct pci_dev *pdev = to_pci_dev(host->dev);
2491 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2492 struct mv_host_priv *hpriv = host->private_data;
2494 /* global interrupt mask */
2495 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2497 rc = mv_chip_id(host, board_idx);
2501 n_hc = mv_get_hc_count(host->ports[0]->flags);
2503 for (port = 0; port < host->n_ports; port++)
2504 hpriv->ops->read_preamp(hpriv, port, mmio);
2506 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
2510 hpriv->ops->reset_flash(hpriv, mmio);
2511 hpriv->ops->reset_bus(pdev, mmio);
2512 hpriv->ops->enable_leds(hpriv, mmio);
2514 for (port = 0; port < host->n_ports; port++) {
2515 if (IS_GEN_II(hpriv)) {
2516 void __iomem *port_mmio = mv_port_base(mmio, port);
2518 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
2519 ifctl |= (1 << 7); /* enable gen2i speed */
2520 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
2521 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2524 hpriv->ops->phy_errata(hpriv, mmio, port);
2527 for (port = 0; port < host->n_ports; port++) {
2528 void __iomem *port_mmio = mv_port_base(mmio, port);
2529 mv_port_init(&host->ports[port]->ioaddr, port_mmio);
2532 for (hc = 0; hc < n_hc; hc++) {
2533 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2535 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2536 "(before clear)=0x%08x\n", hc,
2537 readl(hc_mmio + HC_CFG_OFS),
2538 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2540 /* Clear any currently outstanding hc interrupt conditions */
2541 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
2544 /* Clear any currently outstanding host interrupt conditions */
2545 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
2547 /* and unmask interrupt generation for host regs */
2548 writelfl(PCI_UNMASK_ALL_IRQS, mmio + PCI_IRQ_MASK_OFS);
2550 if (IS_GEN_I(hpriv))
2551 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2553 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
2555 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2556 "PCI int cause/mask=0x%08x/0x%08x\n",
2557 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2558 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
2559 readl(mmio + PCI_IRQ_CAUSE_OFS),
2560 readl(mmio + PCI_IRQ_MASK_OFS));
2567 * mv_print_info - Dump key info to kernel log for perusal.
2568 * @host: ATA host to print info about
2570 * FIXME: complete this.
2573 * Inherited from caller.
2575 static void mv_print_info(struct ata_host *host)
2577 struct pci_dev *pdev = to_pci_dev(host->dev);
2578 struct mv_host_priv *hpriv = host->private_data;
2580 const char *scc_s, *gen;
2582 /* Use this to determine the HW stepping of the chip so we know
2583 * what errata to workaround
2585 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2588 else if (scc == 0x01)
2593 if (IS_GEN_I(hpriv))
2595 else if (IS_GEN_II(hpriv))
2597 else if (IS_GEN_IIE(hpriv))
2602 dev_printk(KERN_INFO, &pdev->dev,
2603 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2604 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
2605 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2609 * mv_init_one - handle a positive probe of a Marvell host
2610 * @pdev: PCI device found
2611 * @ent: PCI device ID entry for the matched host
2614 * Inherited from caller.
2616 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2618 static int printed_version = 0;
2619 unsigned int board_idx = (unsigned int)ent->driver_data;
2620 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2621 struct ata_host *host;
2622 struct mv_host_priv *hpriv;
2625 if (!printed_version++)
2626 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2629 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2631 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2632 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2633 if (!host || !hpriv)
2635 host->private_data = hpriv;
2637 /* acquire resources */
2638 rc = pcim_enable_device(pdev);
2642 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2644 pcim_pin_device(pdev);
2647 host->iomap = pcim_iomap_table(pdev);
2649 rc = pci_go_64(pdev);
2653 /* initialize adapter */
2654 rc = mv_init_host(host, board_idx);
2658 /* Enable interrupts */
2659 if (msi && pci_enable_msi(pdev))
2662 mv_dump_pci_cfg(pdev, 0x68);
2663 mv_print_info(host);
2665 pci_set_master(pdev);
2666 pci_try_set_mwi(pdev);
2667 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
2668 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
2671 static int __init mv_init(void)
2673 return pci_register_driver(&mv_pci_driver);
2676 static void __exit mv_exit(void)
2678 pci_unregister_driver(&mv_pci_driver);
2681 MODULE_AUTHOR("Brett Russ");
2682 MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2683 MODULE_LICENSE("GPL");
2684 MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2685 MODULE_VERSION(DRV_VERSION);
2687 module_param(msi, int, 0444);
2688 MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2690 module_init(mv_init);
2691 module_exit(mv_exit);