/*
- * linux/drivers/ide/ide-dma.c Version 4.10 June 9, 2000
+ * Copyright (C) 1995-1998 Mark Lord
+ * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2004, 2007 Bartlomiej Zolnierkiewicz
*
- * Copyright (c) 1999-2000 Andre Hedrick <andre@linux-ide.org>
* May be copied or modified under the terms of the GNU General Public License
*/
/*
* Special Thanks to Mark for his Six years of work.
- *
- * Copyright (c) 1995-1998 Mark Lord
- * May be copied or modified under the terms of the GNU General Public License
*/
/*
#include <linux/ide.h>
#include <linux/delay.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
u8 stat = 0, dma_stat = 0;
dma_stat = HWIF(drive)->ide_dma_end(drive);
- stat = HWIF(drive)->INB(IDE_STATUS_REG); /* get drive status */
+ stat = ide_read_status(drive);
+
if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
if (!dma_stat) {
struct request *rq = HWGROUP(drive)->rq;
return ide_in_drive_list(drive->id, drive_whitelist);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
/**
* ide_build_sglist - map IDE scatter gather for DMA I/O
* @drive: the drive to build the DMA table for
* @rq: the request holding the sg list
*
- * Perform the PCI mapping magic necessary to access the source or
- * target buffers of a request via PCI DMA. The lower layers of the
+ * Perform the DMA mapping magic necessary to access the source or
+ * target buffers of a request via DMA. The lower layers of the
* kernel provide the necessary cache management so that we can
- * operate in a portable fashion
+ * operate in a portable fashion.
*/
int ide_build_sglist(ide_drive_t *drive, struct request *rq)
ide_map_sg(drive, rq);
if (rq_data_dir(rq) == READ)
- hwif->sg_dma_direction = PCI_DMA_FROMDEVICE;
+ hwif->sg_dma_direction = DMA_FROM_DEVICE;
else
- hwif->sg_dma_direction = PCI_DMA_TODEVICE;
+ hwif->sg_dma_direction = DMA_TO_DEVICE;
- return pci_map_sg(hwif->pci_dev, sg, hwif->sg_nents, hwif->sg_dma_direction);
+ return dma_map_sg(hwif->dev, sg, hwif->sg_nents,
+ hwif->sg_dma_direction);
}
EXPORT_SYMBOL_GPL(ide_build_sglist);
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* ide_build_dmatable - build IDE DMA table
*
*--table |= cpu_to_le32(0x80000000);
return count;
}
+
printk(KERN_ERR "%s: empty DMA table?\n", drive->name);
+
use_pio_instead:
- pci_unmap_sg(hwif->pci_dev,
- hwif->sg_table,
- hwif->sg_nents,
- hwif->sg_dma_direction);
+ ide_destroy_dmatable(drive);
+
return 0; /* revert to PIO for this request */
}
EXPORT_SYMBOL_GPL(ide_build_dmatable);
+#endif
/**
* ide_destroy_dmatable - clean up DMA mapping
void ide_destroy_dmatable (ide_drive_t *drive)
{
- struct pci_dev *dev = HWIF(drive)->pci_dev;
- struct scatterlist *sg = HWIF(drive)->sg_table;
- int nents = HWIF(drive)->sg_nents;
+ ide_hwif_t *hwif = drive->hwif;
- pci_unmap_sg(dev, sg, nents, HWIF(drive)->sg_dma_direction);
+ dma_unmap_sg(hwif->dev, hwif->sg_table, hwif->sg_nents,
+ hwif->sg_dma_direction);
}
EXPORT_SYMBOL_GPL(ide_destroy_dmatable);
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* config_drive_for_dma - attempt to activate IDE DMA
* @drive: the drive to place in DMA mode
}
EXPORT_SYMBOL_GPL(ide_dma_host_set);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
/**
* ide_dma_off_quietly - Generic DMA kill
drive->hwif->dma_host_set(drive, 1);
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
/**
* ide_dma_setup - begin a DMA phase
* @drive: target device
}
#else
static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
int __ide_dma_bad_drive (ide_drive_t *drive)
{
return 0;
}
-#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+void ide_check_dma_crc(ide_drive_t *drive)
+{
+ u8 mode;
+
+ ide_dma_off_quietly(drive);
+ drive->crc_count = 0;
+ mode = drive->current_speed;
+ /*
+ * Don't try non Ultra-DMA modes without iCRC's. Force the
+ * device to PIO and make the user enable SWDMA/MWDMA modes.
+ */
+ if (mode > XFER_UDMA_0 && mode <= XFER_UDMA_7)
+ mode--;
+ else
+ mode = XFER_PIO_4;
+ ide_set_xfer_rate(drive, mode);
+ if (drive->current_speed >= XFER_SW_DMA_0)
+ ide_dma_on(drive);
+}
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
void ide_dma_lost_irq (ide_drive_t *drive)
{
printk("%s: DMA interrupt recovery\n", drive->name);
static void ide_release_dma_engine(ide_hwif_t *hwif)
{
if (hwif->dmatable_cpu) {
- pci_free_consistent(hwif->pci_dev,
- PRD_ENTRIES * PRD_BYTES,
- hwif->dmatable_cpu,
- hwif->dmatable_dma);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+
+ pci_free_consistent(pdev, PRD_ENTRIES * PRD_BYTES,
+ hwif->dmatable_cpu, hwif->dmatable_dma);
hwif->dmatable_cpu = NULL;
}
}
static int ide_allocate_dma_engine(ide_hwif_t *hwif)
{
- hwif->dmatable_cpu = pci_alloc_consistent(hwif->pci_dev,
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+
+ hwif->dmatable_cpu = pci_alloc_consistent(pdev,
PRD_ENTRIES * PRD_BYTES,
&hwif->dmatable_dma);
}
EXPORT_SYMBOL_GPL(ide_setup_dma);
-#endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */