Merge branch 'topic/misc' into for-linus
[safe/jmp/linux-2.6] / drivers / ata / sata_sil.c
index 1773709..3cb69d5 100644 (file)
 #include <linux/device.h>
 #include <scsi/scsi_host.h>
 #include <linux/libata.h>
+#include <linux/dmi.h>
 
 #define DRV_NAME       "sata_sil"
-#define DRV_VERSION    "2.3"
+#define DRV_VERSION    "2.4"
+
+#define SIL_DMA_BOUNDARY       0x7fffffffUL
 
 enum {
        SIL_MMIO_BAR            = 5,
@@ -118,6 +121,10 @@ static void sil_dev_config(struct ata_device *dev);
 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed);
+static void sil_qc_prep(struct ata_queued_cmd *qc);
+static void sil_bmdma_setup(struct ata_queued_cmd *qc);
+static void sil_bmdma_start(struct ata_queued_cmd *qc);
+static void sil_bmdma_stop(struct ata_queued_cmd *qc);
 static void sil_freeze(struct ata_port *ap);
 static void sil_thaw(struct ata_port *ap);
 
@@ -167,13 +174,22 @@ static struct pci_driver sil_pci_driver = {
 };
 
 static struct scsi_host_template sil_sht = {
-       ATA_BMDMA_SHT(DRV_NAME),
+       ATA_BASE_SHT(DRV_NAME),
+       /** These controllers support Large Block Transfer which allows
+           transfer chunks up to 2GB and which cross 64KB boundaries,
+           therefore the DMA limits are more relaxed than standard ATA SFF. */
+       .dma_boundary           = SIL_DMA_BOUNDARY,
+       .sg_tablesize           = ATA_MAX_PRD
 };
 
 static struct ata_port_operations sil_ops = {
-       .inherits               = &ata_bmdma_port_ops,
+       .inherits               = &ata_bmdma32_port_ops,
        .dev_config             = sil_dev_config,
        .set_mode               = sil_set_mode,
+       .bmdma_setup            = sil_bmdma_setup,
+       .bmdma_start            = sil_bmdma_start,
+       .bmdma_stop             = sil_bmdma_stop,
+       .qc_prep                = sil_qc_prep,
        .freeze                 = sil_freeze,
        .thaw                   = sil_thaw,
        .scr_read               = sil_scr_read,
@@ -184,8 +200,8 @@ static const struct ata_port_info sil_port_info[] = {
        /* sil_3112 */
        {
                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE,
-               .pio_mask       = 0x1f,                 /* pio0-4 */
-               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
                .udma_mask      = ATA_UDMA5,
                .port_ops       = &sil_ops,
        },
@@ -193,24 +209,24 @@ static const struct ata_port_info sil_port_info[] = {
        {
                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE |
                                  SIL_FLAG_NO_SATA_IRQ,
-               .pio_mask       = 0x1f,                 /* pio0-4 */
-               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
                .udma_mask      = ATA_UDMA5,
                .port_ops       = &sil_ops,
        },
        /* sil_3512 */
        {
                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
-               .pio_mask       = 0x1f,                 /* pio0-4 */
-               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
                .udma_mask      = ATA_UDMA5,
                .port_ops       = &sil_ops,
        },
        /* sil_3114 */
        {
                .flags          = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT,
-               .pio_mask       = 0x1f,                 /* pio0-4 */
-               .mwdma_mask     = 0x07,                 /* mwdma0-2 */
+               .pio_mask       = ATA_PIO4,
+               .mwdma_mask     = ATA_MWDMA2,
                .udma_mask      = ATA_UDMA5,
                .port_ops       = &sil_ops,
        },
@@ -249,6 +265,83 @@ module_param(slow_down, int, 0444);
 MODULE_PARM_DESC(slow_down, "Sledgehammer used to work around random problems, by limiting commands to 15 sectors (0=off, 1=on)");
 
 
+static void sil_bmdma_stop(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+       void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
+
+       /* clear start/stop bit - can safely always write 0 */
+       iowrite8(0, bmdma2);
+
+       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
+       ata_sff_dma_pause(ap);
+}
+
+static void sil_bmdma_setup(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void __iomem *bmdma = ap->ioaddr.bmdma_addr;
+
+       /* load PRD table addr. */
+       iowrite32(ap->prd_dma, bmdma + ATA_DMA_TABLE_OFS);
+
+       /* issue r/w command */
+       ap->ops->sff_exec_command(ap, &qc->tf);
+}
+
+static void sil_bmdma_start(struct ata_queued_cmd *qc)
+{
+       unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
+       struct ata_port *ap = qc->ap;
+       void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
+       void __iomem *bmdma2 = mmio_base + sil_port[ap->port_no].bmdma2;
+       u8 dmactl = ATA_DMA_START;
+
+       /* set transfer direction, start host DMA transaction
+          Note: For Large Block Transfer to work, the DMA must be started
+          using the bmdma2 register. */
+       if (!rw)
+               dmactl |= ATA_DMA_WR;
+       iowrite8(dmactl, bmdma2);
+}
+
+/* The way God intended PCI IDE scatter/gather lists to look and behave... */
+static void sil_fill_sg(struct ata_queued_cmd *qc)
+{
+       struct scatterlist *sg;
+       struct ata_port *ap = qc->ap;
+       struct ata_prd *prd, *last_prd = NULL;
+       unsigned int si;
+
+       prd = &ap->prd[0];
+       for_each_sg(qc->sg, sg, qc->n_elem, si) {
+               /* Note h/w doesn't support 64-bit, so we unconditionally
+                * truncate dma_addr_t to u32.
+                */
+               u32 addr = (u32) sg_dma_address(sg);
+               u32 sg_len = sg_dma_len(sg);
+
+               prd->addr = cpu_to_le32(addr);
+               prd->flags_len = cpu_to_le32(sg_len);
+               VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", si, addr, sg_len);
+
+               last_prd = prd;
+               prd++;
+       }
+
+       if (likely(last_prd))
+               last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT);
+}
+
+static void sil_qc_prep(struct ata_queued_cmd *qc)
+{
+       if (!(qc->flags & ATA_QCFLAG_DMAMAP))
+               return;
+
+       sil_fill_sg(qc);
+}
+
 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 {
        u8 cache_line = 0;
@@ -439,7 +532,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
                struct ata_port *ap = host->ports[i];
                u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2);
 
-               if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
+               if (unlikely(ap->flags & ATA_FLAG_DISABLED))
                        continue;
 
                /* turn off SATA_IRQ if not supported */
@@ -472,6 +565,19 @@ static void sil_freeze(struct ata_port *ap)
        tmp |= SIL_MASK_IDE0_INT << ap->port_no;
        writel(tmp, mmio_base + SIL_SYSCFG);
        readl(mmio_base + SIL_SYSCFG);  /* flush */
+
+       /* Ensure DMA_ENABLE is off.
+        *
+        * This is because the controller will not give us access to the
+        * taskfile registers while a DMA is in progress
+        */
+       iowrite8(ioread8(ap->ioaddr.bmdma_addr) & ~SIL_DMA_ENABLE,
+                ap->ioaddr.bmdma_addr);
+
+       /* According to ata_bmdma_stop, an HDMA transition requires
+        * on PIO cycle. But we can't read a taskfile register.
+        */
+       ioread8(ap->ioaddr.bmdma_addr);
 }
 
 static void sil_thaw(struct ata_port *ap)
@@ -603,11 +709,38 @@ static void sil_init_controller(struct ata_host *host)
        }
 }
 
+static bool sil_broken_system_poweroff(struct pci_dev *pdev)
+{
+       static const struct dmi_system_id broken_systems[] = {
+               {
+                       .ident = "HP Compaq nx6325",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6325"),
+                       },
+                       /* PCI slot number of the controller */
+                       .driver_data = (void *)0x12UL,
+               },
+
+               { }     /* terminate list */
+       };
+       const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
+
+       if (dmi) {
+               unsigned long slot = (unsigned long)dmi->driver_data;
+               /* apply the quirk only to on-board controllers */
+               return slot == PCI_SLOT(pdev->devfn);
+       }
+
+       return false;
+}
+
 static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        static int printed_version;
        int board_id = ent->driver_data;
-       const struct ata_port_info *ppi[] = { &sil_port_info[board_id], NULL };
+       struct ata_port_info pi = sil_port_info[board_id];
+       const struct ata_port_info *ppi[] = { &pi, NULL };
        struct ata_host *host;
        void __iomem *mmio_base;
        int n_ports, rc;
@@ -621,6 +754,13 @@ static int sil_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (board_id == sil_3114)
                n_ports = 4;
 
+       if (sil_broken_system_poweroff(pdev)) {
+               pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN |
+                                       ATA_FLAG_NO_HIBERNATE_SPINDOWN;
+               dev_info(&pdev->dev, "quirky BIOS, skipping spindown "
+                               "on poweroff and hibernation\n");
+       }
+
        host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
        if (!host)
                return -ENOMEM;