/*
* sata_mv.c - Marvell SATA support
*
+ * Copyright 2008: Marvell Corporation, all rights reserved.
* Copyright 2005: EMC Corporation, all rights reserved.
* Copyright 2005 Red Hat, Inc. All rights reserved.
*
I distinctly remember a couple workarounds (one related to PCI-X)
are still needed.
- 4) Add NCQ support (easy to intermediate, once new-EH support appears)
+ 2) Improve/fix IRQ and error handling sequences.
+
+ 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
+
+ 4) Think about TCQ support here, and for libata in general
+ with controllers that suppport it via host-queuing hardware
+ (a software-only implementation could be a nightmare).
5) Investigate problems with PCI Message Signalled Interrupts (MSI).
- 6) Add port multiplier support (intermediate)
+ 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
+
+ 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
8) Develop a low-power-consumption strategy, and implement it.
Target mode, for those without docs, is the ability to directly
connect two SATA controllers.
- 13) Verify that 7042 is fully supported. I only have a 6042.
-
*/
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/dmapool.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ata_platform.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <linux/libata.h>
#define DRV_NAME "sata_mv"
-#define DRV_VERSION "1.01"
+#define DRV_VERSION "1.20"
enum {
/* BAR's are enumerated in terms of pci_resource_start() terms */
/* CRQB needs alignment on a 1KB boundary. Size == 1KB
* CRPB needs alignment on a 256B boundary. Size == 256B
- * SG count of 176 leads to MV_PORT_PRIV_DMA_SZ == 4KB
* ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
*/
MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
- MV_MAX_SG_CT = 176,
+ MV_MAX_SG_CT = 256,
MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
- MV_PORT_PRIV_DMA_SZ = (MV_CRQB_Q_SZ + MV_CRPB_Q_SZ + MV_SG_TBL_SZ),
MV_PORTS_PER_HC = 4,
/* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
/* Host Flags */
MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
+ /* SoC integrated controllers, no PCI interface */
+ MV_FLAG_SOC = (1 << 28),
+
MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
ATA_FLAG_PIO_POLLING,
CRQB_FLAG_READ = (1 << 0),
CRQB_TAG_SHIFT = 1,
CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
+ CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
CRQB_CMD_ADDR_SHIFT = 8,
CRQB_CMD_CS = (0x2 << 11),
HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
HC_MAIN_IRQ_MASK_OFS = 0x1d64,
+ HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
+ HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
PORT0_ERR = (1 << 0), /* shift by port # */
PORT0_DONE = (1 << 1), /* shift by port # */
HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
TWSI_INT = (1 << 24),
HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
+ HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
HC_MAIN_RSVD),
HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
HC_MAIN_RSVD_5),
+ HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
/* SATAHC registers */
HC_CFG_OFS = 0,
SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
SATA_ACTIVE_OFS = 0x350,
SATA_FIS_IRQ_CAUSE_OFS = 0x364,
+
+ LTMODE_OFS = 0x30c,
+ LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
+
PHY_MODE3 = 0x310,
PHY_MODE4 = 0x314,
PHY_MODE2 = 0x330,
+ SATA_IFCTL_OFS = 0x344,
+ SATA_IFSTAT_OFS = 0x34c,
+ VENDOR_UNIQUE_FIS_OFS = 0x35c,
+
+ FIS_CFG_OFS = 0x360,
+ FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
+
MV5_PHY_MODE = 0x74,
MV5_LT_MODE = 0x30,
MV5_PHY_CTL = 0x0C,
- SATA_INTERFACE_CTL = 0x050,
+ SATA_INTERFACE_CFG = 0x050,
MV_M2_PREAMP_MASK = 0x7e0,
EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
+ EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
+ EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
EDMA_ERR_IRQ_MASK_OFS = 0xc,
EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
EDMA_ERR_LNK_CTRL_RX_1 |
EDMA_ERR_LNK_CTRL_RX_3 |
- EDMA_ERR_LNK_CTRL_TX,
+ EDMA_ERR_LNK_CTRL_TX |
+ /* temporary, until we fix hotplug: */
+ (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_LNK_DATA_RX |
EDMA_ERR_LNK_DATA_TX |
EDMA_ERR_TRANS_PROTO,
+
EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
EDMA_ERR_PRD_PAR |
EDMA_ERR_DEV_DCON |
/* Port private flags (pp_flags) */
MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
- MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
};
#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
+#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
enum {
/* DMA boundary 0xffff is required by the s/g splitting
chip_608x,
chip_6042,
chip_7042,
+ chip_soc,
};
/* Command ReQuest Block: 32B */
dma_addr_t crqb_dma;
struct mv_crpb *crpb;
dma_addr_t crpb_dma;
- struct mv_sg *sg_tbl;
- dma_addr_t sg_tbl_dma;
+ struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
+ dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
unsigned int req_idx;
unsigned int resp_idx;
u32 hp_flags;
struct mv_port_signal signal[8];
const struct mv_hw_ops *ops;
+ int n_ports;
+ void __iomem *base;
+ void __iomem *main_cause_reg_addr;
+ void __iomem *main_mask_reg_addr;
u32 irq_cause_ofs;
u32 irq_mask_ofs;
u32 unmask_all_irqs;
+ /*
+ * These consistent DMA memory pools give us guaranteed
+ * alignment for hardware-accessed data structures,
+ * and less memory waste in accomplishing the alignment.
+ */
+ struct dma_pool *crqb_pool;
+ struct dma_pool *crpb_pool;
+ struct dma_pool *sg_tbl_pool;
};
struct mv_hw_ops {
int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
- void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
+ void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
};
-static void mv_irq_clear(struct ata_port *ap);
static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
static void mv_qc_prep(struct ata_queued_cmd *qc);
static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
-static void mv_error_handler(struct ata_port *ap);
-static void mv_post_int_cmd(struct ata_queued_cmd *qc);
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
static void mv_eh_freeze(struct ata_port *ap);
static void mv_eh_thaw(struct ata_port *ap);
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void mv6_dev_config(struct ata_device *dev);
static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
-static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port);
static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int n_hc);
static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
-static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio);
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
unsigned int port_no);
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
- void __iomem *port_mmio, int want_ncq);
-static int __mv_stop_dma(struct ata_port *ap);
-
+static int mv_stop_edma(struct ata_port *ap);
+static int mv_stop_edma_engine(void __iomem *port_mmio);
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
+
+static void mv_pmp_select(struct ata_port *ap, int pmp);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline);
+
+/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
+ * because we have to allow room for worst case splitting of
+ * PRDs for 64K boundaries in mv_fill_sg().
+ */
static struct scsi_host_template mv5_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .ioctl = ata_scsi_ioctl,
- .queuecommand = ata_scsi_queuecmd,
- .can_queue = ATA_DEF_QUEUE,
- .this_id = ATA_SHT_THIS_ID,
+ ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = MV_MAX_SG_CT / 2,
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
- .emulated = ATA_SHT_EMULATED,
- .use_clustering = 1,
- .proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = ata_scsi_slave_config,
- .slave_destroy = ata_scsi_slave_destroy,
- .bios_param = ata_std_bios_param,
};
static struct scsi_host_template mv6_sht = {
- .module = THIS_MODULE,
- .name = DRV_NAME,
- .ioctl = ata_scsi_ioctl,
- .queuecommand = ata_scsi_queuecmd,
- .can_queue = ATA_DEF_QUEUE,
- .this_id = ATA_SHT_THIS_ID,
+ ATA_NCQ_SHT(DRV_NAME),
+ .can_queue = MV_MAX_Q_DEPTH - 1,
.sg_tablesize = MV_MAX_SG_CT / 2,
- .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
- .emulated = ATA_SHT_EMULATED,
- .use_clustering = 1,
- .proc_name = DRV_NAME,
.dma_boundary = MV_DMA_BOUNDARY,
- .slave_configure = ata_scsi_slave_config,
- .slave_destroy = ata_scsi_slave_destroy,
- .bios_param = ata_std_bios_param,
};
-static const struct ata_port_operations mv5_ops = {
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
+static struct ata_port_operations mv5_ops = {
+ .inherits = &ata_sff_port_ops,
.qc_prep = mv_qc_prep,
.qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
- .error_handler = mv_error_handler,
- .post_internal_cmd = mv_post_int_cmd,
.freeze = mv_eh_freeze,
.thaw = mv_eh_thaw,
+ .hardreset = mv_hardreset,
+ .error_handler = ata_std_error_handler, /* avoid SFF EH */
+ .post_internal_cmd = ATA_OP_NULL,
.scr_read = mv5_scr_read,
.scr_write = mv5_scr_write,
.port_stop = mv_port_stop,
};
-static const struct ata_port_operations mv6_ops = {
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
-
- .qc_prep = mv_qc_prep,
- .qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
-
- .error_handler = mv_error_handler,
- .post_internal_cmd = mv_post_int_cmd,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
-
+static struct ata_port_operations mv6_ops = {
+ .inherits = &mv5_ops,
+ .qc_defer = sata_pmp_qc_defer_cmd_switch,
+ .dev_config = mv6_dev_config,
.scr_read = mv_scr_read,
.scr_write = mv_scr_write,
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
+ .pmp_hardreset = mv_pmp_hardreset,
+ .pmp_softreset = mv_softreset,
+ .softreset = mv_softreset,
+ .error_handler = sata_pmp_error_handler,
};
-static const struct ata_port_operations mv_iie_ops = {
- .tf_load = ata_tf_load,
- .tf_read = ata_tf_read,
- .check_status = ata_check_status,
- .exec_command = ata_exec_command,
- .dev_select = ata_std_dev_select,
-
- .cable_detect = ata_cable_sata,
-
+static struct ata_port_operations mv_iie_ops = {
+ .inherits = &mv6_ops,
+ .qc_defer = ata_std_qc_defer, /* FIS-based switching */
+ .dev_config = ATA_OP_NULL,
.qc_prep = mv_qc_prep_iie,
- .qc_issue = mv_qc_issue,
- .data_xfer = ata_data_xfer,
-
- .irq_clear = mv_irq_clear,
- .irq_on = ata_irq_on,
-
- .error_handler = mv_error_handler,
- .post_internal_cmd = mv_post_int_cmd,
- .freeze = mv_eh_freeze,
- .thaw = mv_eh_thaw,
-
- .scr_read = mv_scr_read,
- .scr_write = mv_scr_write,
-
- .port_start = mv_port_start,
- .port_stop = mv_port_stop,
};
static const struct ata_port_info mv_port_info[] = {
.port_ops = &mv5_ops,
},
{ /* chip_604x */
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_608x */
.flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
- MV_FLAG_DUAL_HC,
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv6_ops,
},
{ /* chip_6042 */
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
},
{ /* chip_7042 */
- .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS,
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &mv_iie_ops,
+ },
+ { /* chip_soc */
+ .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
+ ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
+ ATA_FLAG_NCQ | MV_FLAG_SOC,
.pio_mask = 0x1f, /* pio0-4 */
.udma_mask = ATA_UDMA6,
.port_ops = &mv_iie_ops,
{ } /* terminate list */
};
-static struct pci_driver mv_pci_driver = {
- .name = DRV_NAME,
- .id_table = mv_pci_tbl,
- .probe = mv_init_one,
- .remove = ata_pci_remove_one,
-};
-
static const struct mv_hw_ops mv5xxx_ops = {
.phy_errata = mv5_phy_errata,
.enable_leds = mv5_enable_leds,
.reset_bus = mv_reset_pci_bus,
};
-/*
- * module options
- */
-static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
-
-
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
- int rc;
-
- if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
- if (rc) {
- rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "64-bit DMA enable failed\n");
- return rc;
- }
- }
- } else {
- rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit DMA enable failed\n");
- return rc;
- }
- rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
- if (rc) {
- dev_printk(KERN_ERR, &pdev->dev,
- "32-bit consistent DMA enable failed\n");
- return rc;
- }
- }
-
- return rc;
-}
+static const struct mv_hw_ops mv_soc_ops = {
+ .phy_errata = mv6_phy_errata,
+ .enable_leds = mv_soc_enable_leds,
+ .read_preamp = mv_soc_read_preamp,
+ .reset_hc = mv_soc_reset_hc,
+ .reset_flash = mv_soc_reset_flash,
+ .reset_bus = mv_soc_reset_bus,
+};
/*
* Functions
(mv_hardport_from_port(port) * MV_PORT_REG_SZ);
}
-static inline void __iomem *mv_ap_base(struct ata_port *ap)
+static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
{
- return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
+ void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
+ unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
+
+ return hc_mmio + ofs;
}
-static inline int mv_get_hc_count(unsigned long port_flags)
+static inline void __iomem *mv_host_base(struct ata_host *host)
{
- return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
+ struct mv_host_priv *hpriv = host->private_data;
+ return hpriv->base;
}
-static void mv_irq_clear(struct ata_port *ap)
+static inline void __iomem *mv_ap_base(struct ata_port *ap)
+{
+ return mv_port_base(mv_host_base(ap->host), ap->port_no);
+}
+
+static inline int mv_get_hc_count(unsigned long port_flags)
{
+ return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
}
static void mv_set_edma_ptrs(void __iomem *port_mmio,
if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
if (want_ncq != using_ncq)
- __mv_stop_dma(ap);
+ mv_stop_edma(ap);
}
if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
struct mv_host_priv *hpriv = ap->host->private_data;
int hard_port = mv_hardport_from_port(ap->port_no);
void __iomem *hc_mmio = mv_hc_base_from_port(
- ap->host->iomap[MV_PRIMARY_BAR], hard_port);
+ mv_host_base(ap->host), hard_port);
u32 hc_irq_cause, ipending;
/* clear EDMA event indicators, if any */
hc_mmio + HC_IRQ_CAUSE_OFS);
}
- mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
+ mv_edma_cfg(ap, want_ncq);
/* clear FIS IRQ Cause */
writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
}
/**
- * __mv_stop_dma - Disable eDMA engine
- * @ap: ATA channel to manipulate
- *
- * Verify the local cache of the eDMA state is accurate with a
- * WARN_ON.
+ * mv_stop_edma_engine - Disable eDMA engine
+ * @port_mmio: io base address
*
* LOCKING:
* Inherited from caller.
*/
-static int __mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma_engine(void __iomem *port_mmio)
{
- void __iomem *port_mmio = mv_ap_base(ap);
- struct mv_port_priv *pp = ap->private_data;
- u32 reg;
- int i, err = 0;
+ int i;
- if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
- /* Disable EDMA if active. The disable bit auto clears.
- */
- writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
- pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
- } else {
- WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
- }
+ /* Disable eDMA. The disable bit auto clears. */
+ writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
- /* now properly wait for the eDMA to stop */
- for (i = 1000; i > 0; i--) {
- reg = readl(port_mmio + EDMA_CMD_OFS);
+ /* Wait for the chip to confirm eDMA is off. */
+ for (i = 10000; i > 0; i--) {
+ u32 reg = readl(port_mmio + EDMA_CMD_OFS);
if (!(reg & EDMA_EN))
- break;
-
- udelay(100);
- }
-
- if (reg & EDMA_EN) {
- ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
- err = -EIO;
+ return 0;
+ udelay(10);
}
-
- return err;
+ return -EIO;
}
-static int mv_stop_dma(struct ata_port *ap)
+static int mv_stop_edma(struct ata_port *ap)
{
- unsigned long flags;
- int rc;
-
- spin_lock_irqsave(&ap->host->lock, flags);
- rc = __mv_stop_dma(ap);
- spin_unlock_irqrestore(&ap->host->lock, flags);
+ void __iomem *port_mmio = mv_ap_base(ap);
+ struct mv_port_priv *pp = ap->private_data;
- return rc;
+ if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
+ return 0;
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
+ if (mv_stop_edma_engine(port_mmio)) {
+ ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
+ return -EIO;
+ }
+ return 0;
}
#ifdef ATA_DEBUG
return -EINVAL;
}
-static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
- void __iomem *port_mmio, int want_ncq)
+static void mv6_dev_config(struct ata_device *adev)
+{
+ /*
+ * Deal with Gen-II ("mv6") hardware quirks/restrictions:
+ *
+ * Gen-II does not support NCQ over a port multiplier
+ * (no FIS-based switching).
+ *
+ * We don't have hob_nsect when doing NCQ commands on Gen-II.
+ * See mv_qc_prep() for more info.
+ */
+ if (adev->flags & ATA_DFLAG_NCQ) {
+ if (sata_pmp_attached(adev->link->ap))
+ adev->flags &= ~ATA_DFLAG_NCQ;
+ else if (adev->max_sectors > ATA_MAX_SECTORS)
+ adev->max_sectors = ATA_MAX_SECTORS;
+ }
+}
+
+static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
+{
+ u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
+ /*
+ * Various bit settings required for operation
+ * in FIS-based switching (fbs) mode on GenIIe:
+ */
+ old_fcfg = readl(port_mmio + FIS_CFG_OFS);
+ old_ltmode = readl(port_mmio + LTMODE_OFS);
+ if (enable_fbs) {
+ new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode | LTMODE_BIT8;
+ } else { /* disable fbs */
+ new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
+ new_ltmode = old_ltmode & ~LTMODE_BIT8;
+ }
+ if (new_fcfg != old_fcfg)
+ writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
+ if (new_ltmode != old_ltmode)
+ writelfl(new_ltmode, port_mmio + LTMODE_OFS);
+}
+
+static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
{
u32 cfg;
+ struct mv_port_priv *pp = ap->private_data;
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *port_mmio = mv_ap_base(ap);
/* set up non-NCQ EDMA configuration */
cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
cfg |= (1 << 22); /* enab 4-entry host queue cache */
cfg |= (1 << 18); /* enab early completion */
cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
+
+ if (want_ncq && sata_pmp_attached(ap)) {
+ cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
+ mv_config_fbs(port_mmio, 1);
+ } else {
+ mv_config_fbs(port_mmio, 0);
+ }
}
if (want_ncq) {
writelfl(cfg, port_mmio + EDMA_CFG_OFS);
}
+static void mv_port_free_dma_mem(struct ata_port *ap)
+{
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
+ int tag;
+
+ if (pp->crqb) {
+ dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
+ pp->crqb = NULL;
+ }
+ if (pp->crpb) {
+ dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
+ pp->crpb = NULL;
+ }
+ /*
+ * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
+ * For later hardware, we have one unique sg_tbl per NCQ tag.
+ */
+ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+ if (pp->sg_tbl[tag]) {
+ if (tag == 0 || !IS_GEN_I(hpriv))
+ dma_pool_free(hpriv->sg_tbl_pool,
+ pp->sg_tbl[tag],
+ pp->sg_tbl_dma[tag]);
+ pp->sg_tbl[tag] = NULL;
+ }
+ }
+}
+
/**
* mv_port_start - Port specific init/start routine.
* @ap: ATA channel to manipulate
struct device *dev = ap->host->dev;
struct mv_host_priv *hpriv = ap->host->private_data;
struct mv_port_priv *pp;
- void __iomem *port_mmio = mv_ap_base(ap);
- void *mem;
- dma_addr_t mem_dma;
- unsigned long flags;
- int rc;
+ int tag;
pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
if (!pp)
return -ENOMEM;
+ ap->private_data = pp;
- mem = dmam_alloc_coherent(dev, MV_PORT_PRIV_DMA_SZ, &mem_dma,
- GFP_KERNEL);
- if (!mem)
+ pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
+ if (!pp->crqb)
return -ENOMEM;
- memset(mem, 0, MV_PORT_PRIV_DMA_SZ);
-
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
+ memset(pp->crqb, 0, MV_CRQB_Q_SZ);
- /* First item in chunk of DMA memory:
- * 32-slot command request table (CRQB), 32 bytes each in size
- */
- pp->crqb = mem;
- pp->crqb_dma = mem_dma;
- mem += MV_CRQB_Q_SZ;
- mem_dma += MV_CRQB_Q_SZ;
-
- /* Second item:
- * 32-slot command response table (CRPB), 8 bytes each in size
- */
- pp->crpb = mem;
- pp->crpb_dma = mem_dma;
- mem += MV_CRPB_Q_SZ;
- mem_dma += MV_CRPB_Q_SZ;
+ pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
+ if (!pp->crpb)
+ goto out_port_free_dma_mem;
+ memset(pp->crpb, 0, MV_CRPB_Q_SZ);
- /* Third item:
- * Table of scatter-gather descriptors (ePRD), 16 bytes each
- */
- pp->sg_tbl = mem;
- pp->sg_tbl_dma = mem_dma;
-
- spin_lock_irqsave(&ap->host->lock, flags);
-
- mv_edma_cfg(pp, hpriv, port_mmio, 0);
-
- mv_set_edma_ptrs(port_mmio, hpriv, pp);
-
- spin_unlock_irqrestore(&ap->host->lock, flags);
-
- /* Don't turn on EDMA here...do it before DMA commands only. Else
- * we'll be unable to send non-data, PIO, etc due to restricted access
- * to shadow regs.
+ /*
+ * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
+ * For later hardware, we need one unique sg_tbl per NCQ tag.
*/
- ap->private_data = pp;
+ for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
+ if (tag == 0 || !IS_GEN_I(hpriv)) {
+ pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
+ GFP_KERNEL, &pp->sg_tbl_dma[tag]);
+ if (!pp->sg_tbl[tag])
+ goto out_port_free_dma_mem;
+ } else {
+ pp->sg_tbl[tag] = pp->sg_tbl[0];
+ pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
+ }
+ }
return 0;
+
+out_port_free_dma_mem:
+ mv_port_free_dma_mem(ap);
+ return -ENOMEM;
}
/**
*/
static void mv_port_stop(struct ata_port *ap)
{
- mv_stop_dma(ap);
+ mv_stop_edma(ap);
+ mv_port_free_dma_mem(ap);
}
/**
struct mv_sg *mv_sg, *last_sg = NULL;
unsigned int si;
- mv_sg = pp->sg_tbl;
+ mv_sg = pp->sg_tbl[qc->tag];
for_each_sg(qc->sg, sg, qc->n_elem, si) {
dma_addr_t addr = sg_dma_address(sg);
u32 sg_len = sg_dma_len(sg);
u16 flags = 0;
unsigned in_index;
- if (qc->tf.protocol != ATA_PROT_DMA)
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ))
return;
/* Fill in command request block
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
pp->crqb[in_index].sg_addr =
- cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
+ cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
pp->crqb[in_index].sg_addr_hi =
- cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
+ cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
cw = &pp->crqb[in_index].ata_cmd[0];
case ATA_CMD_WRITE_FUA_EXT:
mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
break;
-#ifdef LIBATA_NCQ /* FIXME: remove this line when NCQ added */
case ATA_CMD_FPDMA_READ:
case ATA_CMD_FPDMA_WRITE:
mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
break;
-#endif /* FIXME: remove this line when NCQ added */
default:
/* The only other commands EDMA supports in non-queued and
* non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
unsigned in_index;
u32 flags = 0;
- if (qc->tf.protocol != ATA_PROT_DMA)
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ))
return;
- /* Fill in Gen IIE command request block
- */
+ /* Fill in Gen IIE command request block */
if (!(qc->tf.flags & ATA_TFLAG_WRITE))
flags |= CRQB_FLAG_READ;
WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
flags |= qc->tag << CRQB_TAG_SHIFT;
flags |= qc->tag << CRQB_HOSTQ_SHIFT;
+ flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
/* get current queue index from software */
in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
- crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff);
- crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16);
+ crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
+ crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
crqb->flags = cpu_to_le32(flags);
tf = &qc->tf;
struct mv_port_priv *pp = ap->private_data;
u32 in_index;
- if (qc->tf.protocol != ATA_PROT_DMA) {
- /* We're about to send a non-EDMA capable command to the
+ if ((qc->tf.protocol != ATA_PROT_DMA) &&
+ (qc->tf.protocol != ATA_PROT_NCQ)) {
+ /*
+ * We're about to send a non-EDMA capable command to the
* port. Turn off EDMA so there won't be problems accessing
* shadow block, etc registers.
*/
- __mv_stop_dma(ap);
- return ata_qc_issue_prot(qc);
+ mv_stop_edma(ap);
+ mv_pmp_select(ap, qc->dev->link->pmp);
+ return ata_sff_qc_issue(qc);
}
mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
- in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
-
- /* until we do queuing, the queue should be empty at this point */
- WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
- >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK));
-
pp->req_idx++;
in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
* @reset_allowed: bool: 0 == don't trigger from reset here
*
* In most cases, just clear the interrupt and move on. However,
- * some cases require an eDMA reset, which is done right before
- * the COMRESET in mv_phy_reset(). The SERR case requires a
- * clear of pending errors in the SATA SERROR register. Finally,
- * if the port disabled DMA, update our cached copy to match.
+ * some cases require an eDMA reset, which also performs a COMRESET.
+ * The SERR case requires a clear of pending errors in the SATA
+ * SERROR register. Finally, if the port disabled DMA,
+ * update our cached copy to match.
*
* LOCKING:
* Inherited from caller.
EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
EDMA_ERR_INTRL_PAR)) {
err_mask |= AC_ERR_ATA_BUS;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
ata_ehi_push_desc(ehi, "parity error");
}
if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
ata_ehi_hotplugged(ehi);
ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
"dev disconnect" : "dev connect");
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
if (IS_GEN_I(hpriv)) {
eh_freeze_mask = EDMA_EH_FREEZE_5;
if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
- struct mv_port_priv *pp = ap->private_data;
+ pp = ap->private_data;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
eh_freeze_mask = EDMA_EH_FREEZE;
if (edma_err_cause & EDMA_ERR_SELF_DIS) {
- struct mv_port_priv *pp = ap->private_data;
+ pp = ap->private_data;
pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
ata_ehi_push_desc(ehi, "EDMA self-disable");
}
sata_scr_read(&ap->link, SCR_ERROR, &serr);
sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
err_mask = AC_ERR_ATA_BUS;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
}
if (!err_mask) {
err_mask = AC_ERR_OTHER;
- action |= ATA_EH_HARDRESET;
+ action |= ATA_EH_RESET;
}
ehi->serror |= serr;
*/
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
{
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
u32 hc_irq_cause;
- int port, port0;
+ int port, port0, last_port;
if (hc == 0)
port0 = 0;
else
port0 = MV_PORTS_PER_HC;
+ if (HAS_PCI(host))
+ last_port = port0 + MV_PORTS_PER_HC;
+ else
+ last_port = port0 + hpriv->n_ports;
/* we'll need the HC success int register in most cases */
hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
if (!hc_irq_cause)
VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
hc, relevant, hc_irq_cause);
- for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
+ for (port = port0; port < last_port; port++) {
struct ata_port *ap = host->ports[port];
- struct mv_port_priv *pp = ap->private_data;
+ struct mv_port_priv *pp;
int have_err_bits, hard_port, shift;
if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
continue;
+ pp = ap->private_data;
+
shift = port << 1; /* (port * 2) */
- if (port >= MV_PORTS_PER_HC) {
+ if (port >= MV_PORTS_PER_HC)
shift++; /* skip bit 8 in the HC Main IRQ reg */
- }
+
have_err_bits = ((PORT0_ERR << shift) & relevant);
if (unlikely(have_err_bits)) {
ata_ehi_push_desc(ehi,
"PCI err cause 0x%08x", err_cause);
err_mask = AC_ERR_HOST_BUS;
- ehi->action = ATA_EH_HARDRESET;
+ ehi->action = ATA_EH_RESET;
qc = ata_qc_from_tag(ap, ap->link.active_tag);
if (qc)
qc->err_mask |= err_mask;
static irqreturn_t mv_interrupt(int irq, void *dev_instance)
{
struct ata_host *host = dev_instance;
+ struct mv_host_priv *hpriv = host->private_data;
unsigned int hc, handled = 0, n_hcs;
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
+ void __iomem *mmio = hpriv->base;
u32 irq_stat, irq_mask;
+ /* Note to self: &host->lock == &ap->host->lock == ap->lock */
spin_lock(&host->lock);
- irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
- irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
+
+ irq_stat = readl(hpriv->main_cause_reg_addr);
+ irq_mask = readl(hpriv->main_mask_reg_addr);
/* check the cases where we either have nothing pending or have read
* a bogus register value which can indicate HW removal or PCI fault
n_hcs = mv_get_hc_count(host->ports[0]->flags);
- if (unlikely(irq_stat & PCI_ERR)) {
+ if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
mv_pci_error(host, mmio);
handled = 1;
goto out_unlock; /* skip all other HC irq handling */
return IRQ_RETVAL(handled);
}
-static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
-{
- void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
- unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
-
- return hc_mmio + ofs;
-}
-
static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
{
unsigned int ofs;
static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
unsigned int ofs = mv5_scr_offset(sc_reg_in);
return -EINVAL;
}
-static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
+static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
{
+ struct pci_dev *pdev = to_pci_dev(host->dev);
int early_5080;
early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
}
- mv_reset_pci_bus(pdev, mmio);
+ mv_reset_pci_bus(host, mmio);
}
static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
{
void __iomem *port_mmio = mv_port_base(mmio, port);
- writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
-
- mv_channel_reset(hpriv, mmio, port);
+ /*
+ * The datasheet warns against setting ATA_RST when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the ATA_RST operation.
+ */
+ mv_reset_channel(hpriv, mmio, port);
ZERO(0x028); /* command */
writel(0x11f, port_mmio + EDMA_CFG_OFS);
#undef ZERO
#define ZERO(reg) writel(0, mmio + (reg))
-static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
+static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
{
- struct ata_host *host = dev_get_drvdata(&pdev->dev);
struct mv_host_priv *hpriv = host->private_data;
u32 tmp;
printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
rc = 1;
}
+ /*
+ * Temporary: wait 3 seconds before port-probing can happen,
+ * so that we don't miss finding sleepy SilXXXX port-multipliers.
+ * This can go away once hotplug is fully/correctly implemented.
+ */
+ if (rc == 0)
+ msleep(3000);
done:
return rc;
}
m4 = readl(port_mmio + PHY_MODE4);
if (hp_flags & MV_HP_ERRATA_60X1B2)
- tmp = readl(port_mmio + 0x310);
+ tmp = readl(port_mmio + PHY_MODE3);
+ /* workaround for errata FEr SATA#10 (part 1) */
m4 = (m4 & ~(1 << 1)) | (1 << 0);
writel(m4, port_mmio + PHY_MODE4);
if (hp_flags & MV_HP_ERRATA_60X1B2)
- writel(tmp, port_mmio + 0x310);
+ writel(tmp, port_mmio + PHY_MODE3);
}
/* Revert values of pre-emphasis and signal amps to the saved ones */
writel(m2, port_mmio + PHY_MODE2);
}
-static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
- unsigned int port_no)
+/* TODO: use the generic LED interface to configure the SATA Presence */
+/* & Acitivy LEDs on the board */
+static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
{
- void __iomem *port_mmio = mv_port_base(mmio, port_no);
-
- writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
-
- if (IS_GEN_II(hpriv)) {
- u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
- writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
- }
-
- udelay(25); /* allow reset propagation */
-
- /* Spec never mentions clearing the bit. Marvell's driver does
- * clear the bit, however.
- */
- writelfl(0, port_mmio + EDMA_CMD_OFS);
-
- hpriv->ops->phy_errata(hpriv, mmio, port_no);
-
- if (IS_GEN_I(hpriv))
- mdelay(1);
+ return;
}
-/**
- * mv_phy_reset - Perform eDMA reset followed by COMRESET
- * @ap: ATA channel to manipulate
- *
- * Part of this is taken from __sata_phy_reset and modified to
- * not sleep since this routine gets called from interrupt level.
- *
- * LOCKING:
- * Inherited from caller. This is coded to safe to call at
- * interrupt level, i.e. it does not sleep.
- */
-static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
- unsigned long deadline)
+static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
+ void __iomem *mmio)
{
- struct mv_port_priv *pp = ap->private_data;
- struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *port_mmio = mv_ap_base(ap);
- int retry = 5;
- u32 sstatus;
+ void __iomem *port_mmio;
+ u32 tmp;
- VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
+ port_mmio = mv_port_base(mmio, idx);
+ tmp = readl(port_mmio + PHY_MODE2);
-#ifdef DEBUG
- {
- u32 sstatus, serror, scontrol;
+ hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
+ hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
+}
- mv_scr_read(ap, SCR_STATUS, &sstatus);
- mv_scr_read(ap, SCR_ERROR, &serror);
- mv_scr_read(ap, SCR_CONTROL, &scontrol);
- DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", sstatus, serror, scontrol);
- }
-#endif
+#undef ZERO
+#define ZERO(reg) writel(0, port_mmio + (reg))
+static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int port)
+{
+ void __iomem *port_mmio = mv_port_base(mmio, port);
- /* Issue COMRESET via SControl */
-comreset_retry:
- sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
- msleep(1);
+ /*
+ * The datasheet warns against setting ATA_RST when EDMA is active
+ * (but doesn't say what the problem might be). So we first try
+ * to disable the EDMA engine before doing the ATA_RST operation.
+ */
+ mv_reset_channel(hpriv, mmio, port);
+
+ ZERO(0x028); /* command */
+ writel(0x101f, port_mmio + EDMA_CFG_OFS);
+ ZERO(0x004); /* timer */
+ ZERO(0x008); /* irq err cause */
+ ZERO(0x00c); /* irq err mask */
+ ZERO(0x010); /* rq bah */
+ ZERO(0x014); /* rq inp */
+ ZERO(0x018); /* rq outp */
+ ZERO(0x01c); /* respq bah */
+ ZERO(0x024); /* respq outp */
+ ZERO(0x020); /* respq inp */
+ ZERO(0x02c); /* test control */
+ writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
+}
- sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
- msleep(20);
+#undef ZERO
- do {
- sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
- if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
- break;
+#define ZERO(reg) writel(0, hc_mmio + (reg))
+static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ void __iomem *hc_mmio = mv_hc_base(mmio, 0);
- msleep(1);
- } while (time_before(jiffies, deadline));
+ ZERO(0x00c);
+ ZERO(0x010);
+ ZERO(0x014);
- /* work around errata */
- if (IS_GEN_II(hpriv) &&
- (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
- (retry-- > 0))
- goto comreset_retry;
+}
-#ifdef DEBUG
- {
- u32 sstatus, serror, scontrol;
+#undef ZERO
- mv_scr_read(ap, SCR_STATUS, &sstatus);
- mv_scr_read(ap, SCR_ERROR, &serror);
- mv_scr_read(ap, SCR_CONTROL, &scontrol);
- DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
- "SCtrl 0x%08x\n", sstatus, serror, scontrol);
- }
-#endif
+static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
+ void __iomem *mmio, unsigned int n_hc)
+{
+ unsigned int port;
- if (ata_link_offline(&ap->link)) {
- *class = ATA_DEV_NONE;
- return;
- }
+ for (port = 0; port < hpriv->n_ports; port++)
+ mv_soc_reset_hc_port(hpriv, mmio, port);
- /* even after SStatus reflects that device is ready,
- * it seems to take a while for link to be fully
- * established (and thus Status no longer 0x80/0x7F),
- * so we poll a bit for that, here.
- */
- retry = 20;
- while (1) {
- u8 drv_stat = ata_check_status(ap);
- if ((drv_stat != 0x80) && (drv_stat != 0x7f))
- break;
- msleep(500);
- if (retry-- <= 0)
- break;
- if (time_after(jiffies, deadline))
- break;
- }
+ mv_soc_reset_one_hc(hpriv, mmio);
- /* FIXME: if we passed the deadline, the following
- * code probably produces an invalid result
- */
+ return 0;
+}
- /* finally, read device signature from TF registers */
- *class = ata_dev_try_classify(ap->link.device, 1, NULL);
+static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
+ void __iomem *mmio)
+{
+ return;
+}
- writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
+static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
+{
+ return;
+}
- WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
+static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
+{
+ u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
- VPRINTK("EXIT\n");
+ ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
+ if (want_gen2i)
+ ifctl |= (1 << 7); /* enable gen2i speed */
+ writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
}
-static int mv_prereset(struct ata_link *link, unsigned long deadline)
+/*
+ * Caller must ensure that EDMA is not active,
+ * by first doing mv_stop_edma() where needed.
+ */
+static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
+ unsigned int port_no)
{
- struct ata_port *ap = link->ap;
- struct mv_port_priv *pp = ap->private_data;
- struct ata_eh_context *ehc = &link->eh_context;
- int rc;
+ void __iomem *port_mmio = mv_port_base(mmio, port_no);
- rc = mv_stop_dma(ap);
- if (rc)
- ehc->i.action |= ATA_EH_HARDRESET;
+ mv_stop_edma_engine(port_mmio);
+ writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
- if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
- pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
- ehc->i.action |= ATA_EH_HARDRESET;
+ if (!IS_GEN_I(hpriv)) {
+ /* Enable 3.0gb/s link speed */
+ mv_setup_ifctl(port_mmio, 1);
}
+ /*
+ * Strobing ATA_RST here causes a hard reset of the SATA transport,
+ * link, and physical layers. It resets all SATA interface registers
+ * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
+ */
+ writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
+ udelay(25); /* allow reset propagation */
+ writelfl(0, port_mmio + EDMA_CMD_OFS);
- /* if we're about to do hardreset, nothing more to do */
- if (ehc->i.action & ATA_EH_HARDRESET)
- return 0;
-
- if (ata_link_online(link))
- rc = ata_wait_ready(ap, deadline);
- else
- rc = -ENODEV;
+ hpriv->ops->phy_errata(hpriv, mmio, port_no);
- return rc;
+ if (IS_GEN_I(hpriv))
+ mdelay(1);
}
-static int mv_hardreset(struct ata_link *link, unsigned int *class,
- unsigned long deadline)
+static void mv_pmp_select(struct ata_port *ap, int pmp)
{
- struct ata_port *ap = link->ap;
- struct mv_host_priv *hpriv = ap->host->private_data;
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ if (sata_pmp_supported(ap)) {
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
+ int old = reg & 0xf;
- mv_stop_dma(ap);
-
- mv_channel_reset(hpriv, mmio, ap->port_no);
+ if (old != pmp) {
+ reg = (reg & ~0xf) | pmp;
+ writelfl(reg, port_mmio + SATA_IFCTL_OFS);
+ }
+ }
+}
- mv_phy_reset(ap, class, deadline);
+static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return sata_std_hardreset(link, class, deadline);
+}
- return 0;
+static int mv_softreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
+{
+ mv_pmp_select(link->ap, sata_srst_pmp(link));
+ return ata_sff_softreset(link, class, deadline);
}
-static void mv_postreset(struct ata_link *link, unsigned int *classes)
+static int mv_hardreset(struct ata_link *link, unsigned int *class,
+ unsigned long deadline)
{
struct ata_port *ap = link->ap;
- u32 serr;
-
- /* print link status */
- sata_print_link_status(link);
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ struct mv_port_priv *pp = ap->private_data;
+ void __iomem *mmio = hpriv->base;
+ int rc, attempts = 0, extra = 0;
+ u32 sstatus;
+ bool online;
- /* clear SError */
- sata_scr_read(link, SCR_ERROR, &serr);
- sata_scr_write_flush(link, SCR_ERROR, serr);
+ mv_reset_channel(hpriv, mmio, ap->port_no);
+ pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
- /* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
- return;
- }
-
- /* set up device control */
- iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
-}
+ /* Workaround for errata FEr SATA#10 (part 2) */
+ do {
+ const unsigned long *timing =
+ sata_ehc_deb_timing(&link->eh_context);
-static void mv_error_handler(struct ata_port *ap)
-{
- ata_do_eh(ap, mv_prereset, ata_std_softreset,
- mv_hardreset, mv_postreset);
-}
+ rc = sata_link_hardreset(link, timing, deadline + extra,
+ &online, NULL);
+ if (rc)
+ return rc;
+ sata_scr_read(link, SCR_STATUS, &sstatus);
+ if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
+ /* Force 1.5gb/s link speed and try again */
+ mv_setup_ifctl(mv_ap_base(ap), 0);
+ if (time_after(jiffies + HZ, deadline))
+ extra = HZ; /* only extend it once, max */
+ }
+ } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
-static void mv_post_int_cmd(struct ata_queued_cmd *qc)
-{
- mv_stop_dma(qc->ap);
+ return rc;
}
static void mv_eh_freeze(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
u32 tmp, mask;
unsigned int shift;
mask = 0x3 << shift;
/* disable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
}
static void mv_eh_thaw(struct ata_port *ap)
{
- void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ void __iomem *mmio = hpriv->base;
unsigned int hc = (ap->port_no > 3) ? 1 : 0;
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
void __iomem *port_mmio = mv_ap_base(ap);
writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
/* enable assertion of portN err, done events */
- tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
- writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
+ tmp = readl(hpriv->main_mask_reg_addr);
+ writelfl(tmp | mask, hpriv->main_mask_reg_addr);
}
/**
break;
}
break;
+ case chip_soc:
+ hpriv->ops = &mv_soc_ops;
+ hp_flags |= MV_HP_ERRATA_60X1C0;
+ break;
default:
- dev_printk(KERN_ERR, &pdev->dev,
+ dev_printk(KERN_ERR, host->dev,
"BUG: invalid board index %u\n", board_idx);
return 1;
}
static int mv_init_host(struct ata_host *host, unsigned int board_idx)
{
int rc = 0, n_hc, port, hc;
- struct pci_dev *pdev = to_pci_dev(host->dev);
- void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
struct mv_host_priv *hpriv = host->private_data;
-
- /* global interrupt mask */
- writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
+ void __iomem *mmio = hpriv->base;
rc = mv_chip_id(host, board_idx);
if (rc)
- goto done;
+ goto done;
+
+ if (HAS_PCI(host)) {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
+ } else {
+ hpriv->main_cause_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_CAUSE_OFS;
+ hpriv->main_mask_reg_addr = hpriv->base +
+ HC_SOC_MAIN_IRQ_MASK_OFS;
+ }
+ /* global interrupt mask */
+ writel(0, hpriv->main_mask_reg_addr);
n_hc = mv_get_hc_count(host->ports[0]->flags);
goto done;
hpriv->ops->reset_flash(hpriv, mmio);
- hpriv->ops->reset_bus(pdev, mmio);
+ hpriv->ops->reset_bus(host, mmio);
hpriv->ops->enable_leds(hpriv, mmio);
for (port = 0; port < host->n_ports; port++) {
- if (IS_GEN_II(hpriv)) {
- void __iomem *port_mmio = mv_port_base(mmio, port);
-
- u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
- ifctl |= (1 << 7); /* enable gen2i speed */
- ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
- writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
- }
-
- hpriv->ops->phy_errata(hpriv, mmio, port);
- }
-
- for (port = 0; port < host->n_ports; port++) {
struct ata_port *ap = host->ports[port];
void __iomem *port_mmio = mv_port_base(mmio, port);
- unsigned int offset = port_mmio - mmio;
mv_port_init(&ap->ioaddr, port_mmio);
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
- ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+#ifdef CONFIG_PCI
+ if (HAS_PCI(host)) {
+ unsigned int offset = port_mmio - mmio;
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
+ ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
+ }
+#endif
}
for (hc = 0; hc < n_hc; hc++) {
writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
}
- /* Clear any currently outstanding host interrupt conditions */
- writelfl(0, mmio + hpriv->irq_cause_ofs);
+ if (HAS_PCI(host)) {
+ /* Clear any currently outstanding host interrupt conditions */
+ writelfl(0, mmio + hpriv->irq_cause_ofs);
- /* and unmask interrupt generation for host regs */
- writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ /* and unmask interrupt generation for host regs */
+ writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
+ if (IS_GEN_I(hpriv))
+ writelfl(~HC_MAIN_MASKED_IRQS_5,
+ hpriv->main_mask_reg_addr);
+ else
+ writelfl(~HC_MAIN_MASKED_IRQS,
+ hpriv->main_mask_reg_addr);
+
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
+ "PCI int cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr),
+ readl(mmio + hpriv->irq_cause_ofs),
+ readl(mmio + hpriv->irq_mask_ofs));
+ } else {
+ writelfl(~HC_MAIN_MASKED_IRQS_SOC,
+ hpriv->main_mask_reg_addr);
+ VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
+ readl(hpriv->main_cause_reg_addr),
+ readl(hpriv->main_mask_reg_addr));
+ }
+done:
+ return rc;
+}
- if (IS_GEN_I(hpriv))
- writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
- else
- writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
+static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
+{
+ hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
+ MV_CRQB_Q_SZ, 0);
+ if (!hpriv->crqb_pool)
+ return -ENOMEM;
+
+ hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
+ MV_CRPB_Q_SZ, 0);
+ if (!hpriv->crpb_pool)
+ return -ENOMEM;
- VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
- "PCI int cause/mask=0x%08x/0x%08x\n",
- readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
- readl(mmio + HC_MAIN_IRQ_MASK_OFS),
- readl(mmio + hpriv->irq_cause_ofs),
- readl(mmio + hpriv->irq_mask_ofs));
+ hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
+ MV_SG_TBL_SZ, 0);
+ if (!hpriv->sg_tbl_pool)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/**
+ * mv_platform_probe - handle a positive probe of an soc Marvell
+ * host
+ * @pdev: platform device found
+ *
+ * LOCKING:
+ * Inherited from caller.
+ */
+static int mv_platform_probe(struct platform_device *pdev)
+{
+ static int printed_version;
+ const struct mv_sata_platform_data *mv_platform_data;
+ const struct ata_port_info *ppi[] =
+ { &mv_port_info[chip_soc], NULL };
+ struct ata_host *host;
+ struct mv_host_priv *hpriv;
+ struct resource *res;
+ int n_ports, rc;
+
+ if (!printed_version++)
+ dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
+
+ /*
+ * Simple resource validation ..
+ */
+ if (unlikely(pdev->num_resources != 2)) {
+ dev_err(&pdev->dev, "invalid number of resources\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Get the register base first
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL)
+ return -EINVAL;
+
+ /* allocate host */
+ mv_platform_data = pdev->dev.platform_data;
+ n_ports = mv_platform_data->n_ports;
+
+ host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
+ hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
+
+ if (!host || !hpriv)
+ return -ENOMEM;
+ host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
+
+ host->iomap = NULL;
+ hpriv->base = devm_ioremap(&pdev->dev, res->start,
+ res->end - res->start + 1);
+ hpriv->base -= MV_SATAHC0_REG_BASE;
+
+ rc = mv_create_dma_pools(hpriv, &pdev->dev);
+ if (rc)
+ return rc;
+
+ /* initialize adapter */
+ rc = mv_init_host(host, chip_soc);
+ if (rc)
+ return rc;
+
+ dev_printk(KERN_INFO, &pdev->dev,
+ "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
+ host->n_ports);
+
+ return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
+ IRQF_SHARED, &mv6_sht);
+}
+
+/*
+ *
+ * mv_platform_remove - unplug a platform interface
+ * @pdev: platform device
+ *
+ * A platform bus SATA device has been unplugged. Perform the needed
+ * cleanup. Also called on module unload for any active devices.
+ */
+static int __devexit mv_platform_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ata_host *host = dev_get_drvdata(dev);
+
+ ata_host_detach(host);
+ return 0;
+}
+
+static struct platform_driver mv_platform_driver = {
+ .probe = mv_platform_probe,
+ .remove = __devexit_p(mv_platform_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+
+#ifdef CONFIG_PCI
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+
+static struct pci_driver mv_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = mv_pci_tbl,
+ .probe = mv_pci_init_one,
+ .remove = ata_pci_remove_one,
+};
+
+/*
+ * module options
+ */
+static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
+
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+ int rc;
+
+ if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
+ if (rc) {
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "64-bit DMA enable failed\n");
+ return rc;
+ }
+ }
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit DMA enable failed\n");
+ return rc;
+ }
+ rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
+ if (rc) {
+ dev_printk(KERN_ERR, &pdev->dev,
+ "32-bit consistent DMA enable failed\n");
+ return rc;
+ }
+ }
-done:
return rc;
}
}
/**
- * mv_init_one - handle a positive probe of a Marvell host
+ * mv_pci_init_one - handle a positive probe of a PCI Marvell host
* @pdev: PCI device found
* @ent: PCI device ID entry for the matched host
*
* LOCKING:
* Inherited from caller.
*/
-static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int mv_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
static int printed_version;
unsigned int board_idx = (unsigned int)ent->driver_data;
if (!host || !hpriv)
return -ENOMEM;
host->private_data = hpriv;
+ hpriv->n_ports = n_ports;
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
return rc;
host->iomap = pcim_iomap_table(pdev);
+ hpriv->base = host->iomap[MV_PRIMARY_BAR];
rc = pci_go_64(pdev);
if (rc)
return rc;
+ rc = mv_create_dma_pools(hpriv, &pdev->dev);
+ if (rc)
+ return rc;
+
/* initialize adapter */
rc = mv_init_host(host, board_idx);
if (rc)
return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
}
+#endif
+
+static int mv_platform_probe(struct platform_device *pdev);
+static int __devexit mv_platform_remove(struct platform_device *pdev);
static int __init mv_init(void)
{
- return pci_register_driver(&mv_pci_driver);
+ int rc = -ENODEV;
+#ifdef CONFIG_PCI
+ rc = pci_register_driver(&mv_pci_driver);
+ if (rc < 0)
+ return rc;
+#endif
+ rc = platform_driver_register(&mv_platform_driver);
+
+#ifdef CONFIG_PCI
+ if (rc < 0)
+ pci_unregister_driver(&mv_pci_driver);
+#endif
+ return rc;
}
static void __exit mv_exit(void)
{
+#ifdef CONFIG_PCI
pci_unregister_driver(&mv_pci_driver);
+#endif
+ platform_driver_unregister(&mv_platform_driver);
}
MODULE_AUTHOR("Brett Russ");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
+MODULE_ALIAS("platform:" DRV_NAME);
+#ifdef CONFIG_PCI
module_param(msi, int, 0444);
MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
+#endif
module_init(mv_init);
module_exit(mv_exit);