* Hardware documentation available from http://www.t13.org/ and
* http://www.sata-io.org/
*
+ * Standards documents from:
+ * http://www.t13.org (ATA standards, PCI DMA IDE spec)
+ * http://www.t10.org (SCSI MMC - for ATAPI MMC)
+ * http://www.sata-io.org (SATA)
+ * http://www.compactflash.org (CF)
+ * http://www.qic.org (QIC157 - Tape and DSC)
+ * http://www.ce-ata.org (CE-ATA: not supported)
+ *
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/mm.h>
-#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <linux/libata.h>
-#include <asm/semaphore.h>
#include <asm/byteorder.h>
+#include <linux/cdrom.h>
#include "libata.h"
const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
+const struct ata_port_operations ata_base_port_ops = {
+ .prereset = ata_std_prereset,
+ .postreset = ata_std_postreset,
+ .error_handler = ata_std_error_handler,
+};
+
+const struct ata_port_operations sata_port_ops = {
+ .inherits = &ata_base_port_ops,
+
+ .qc_defer = ata_std_qc_defer,
+ .hardreset = sata_std_hardreset,
+};
+
static unsigned int ata_dev_init_params(struct ata_device *dev,
u16 heads, u16 sectors);
static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
struct workqueue_struct *ata_aux_wq;
+struct ata_force_param {
+ const char *name;
+ unsigned int cbl;
+ int spd_limit;
+ unsigned long xfer_mask;
+ unsigned int horkage_on;
+ unsigned int horkage_off;
+};
+
+struct ata_force_ent {
+ int port;
+ int device;
+ struct ata_force_param param;
+};
+
+static struct ata_force_ent *ata_force_tbl;
+static int ata_force_tbl_size;
+
+static char ata_force_param_buf[PAGE_SIZE] __initdata;
+/* param_buf is thrown away after initialization, disallow read */
+module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
+MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
+
int atapi_enabled = 1;
module_param(atapi_enabled, int, 0444);
MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
-int atapi_dmadir = 0;
+static int atapi_dmadir = 0;
module_param(atapi_dmadir, int, 0444);
MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
module_param_named(noacpi, libata_noacpi, int, 0444);
MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
+int libata_allow_tpm = 0;
+module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
+MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
/**
+ * ata_force_cbl - force cable type according to libata.force
+ * @ap: ATA port of interest
+ *
+ * Force cable type according to libata.force and whine about it.
+ * The last entry which has matching port number is used, so it
+ * can be specified as part of device force parameters. For
+ * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
+ * same effect.
+ *
+ * LOCKING:
+ * EH context.
+ */
+void ata_force_cbl(struct ata_port *ap)
+{
+ int i;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != ap->print_id)
+ continue;
+
+ if (fe->param.cbl == ATA_CBL_NONE)
+ continue;
+
+ ap->cbl = fe->param.cbl;
+ ata_port_printk(ap, KERN_NOTICE,
+ "FORCE: cable set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_spd_limit - force SATA spd limit according to libata.force
+ * @link: ATA link of interest
+ *
+ * Force SATA spd limit according to libata.force and whine about
+ * it. When only the port part is specified (e.g. 1:), the limit
+ * applies to all links connected to both the host link and all
+ * fan-out ports connected via PMP. If the device part is
+ * specified as 0 (e.g. 1.00:), it specifies the first fan-out
+ * link not the host link. Device number 15 always points to the
+ * host link whether PMP is attached or not.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_spd_limit(struct ata_link *link)
+{
+ int linkno, i;
+
+ if (ata_is_host_link(link))
+ linkno = 15;
+ else
+ linkno = link->pmp;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != linkno)
+ continue;
+
+ if (!fe->param.spd_limit)
+ continue;
+
+ link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
+ ata_link_printk(link, KERN_NOTICE,
+ "FORCE: PHY spd limit set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_xfermask - force xfermask according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force xfer_mask according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_xfermask(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15 for the first device attached to host port */
+ if (ata_is_host_link(dev->link) && devno == 0)
+ alt_devno = 15;
+
+ for (i = ata_force_tbl_size - 1; i >= 0; i--) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+ unsigned long pio_mask, mwdma_mask, udma_mask;
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!fe->param.xfer_mask)
+ continue;
+
+ ata_unpack_xfermask(fe->param.xfer_mask,
+ &pio_mask, &mwdma_mask, &udma_mask);
+ if (udma_mask)
+ dev->udma_mask = udma_mask;
+ else if (mwdma_mask) {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = mwdma_mask;
+ } else {
+ dev->udma_mask = 0;
+ dev->mwdma_mask = 0;
+ dev->pio_mask = pio_mask;
+ }
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: xfer_mask set to %s\n", fe->param.name);
+ return;
+ }
+}
+
+/**
+ * ata_force_horkage - force horkage according to libata.force
+ * @dev: ATA device of interest
+ *
+ * Force horkage according to libata.force and whine about it.
+ * For consistency with link selection, device number 15 selects
+ * the first device connected to the host link.
+ *
+ * LOCKING:
+ * EH context.
+ */
+static void ata_force_horkage(struct ata_device *dev)
+{
+ int devno = dev->link->pmp + dev->devno;
+ int alt_devno = devno;
+ int i;
+
+ /* allow n.15 for the first device attached to host port */
+ if (ata_is_host_link(dev->link) && devno == 0)
+ alt_devno = 15;
+
+ for (i = 0; i < ata_force_tbl_size; i++) {
+ const struct ata_force_ent *fe = &ata_force_tbl[i];
+
+ if (fe->port != -1 && fe->port != dev->link->ap->print_id)
+ continue;
+
+ if (fe->device != -1 && fe->device != devno &&
+ fe->device != alt_devno)
+ continue;
+
+ if (!(~dev->horkage & fe->param.horkage_on) &&
+ !(dev->horkage & fe->param.horkage_off))
+ continue;
+
+ dev->horkage |= fe->param.horkage_on;
+ dev->horkage &= ~fe->param.horkage_off;
+
+ ata_dev_printk(dev, KERN_NOTICE,
+ "FORCE: horkage modified (%s)\n", fe->param.name);
+ }
+}
+
+/**
+ * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
+ * @opcode: SCSI opcode
+ *
+ * Determine ATAPI command type from @opcode.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
+ */
+int atapi_cmd_type(u8 opcode)
+{
+ switch (opcode) {
+ case GPCMD_READ_10:
+ case GPCMD_READ_12:
+ return ATAPI_READ;
+
+ case GPCMD_WRITE_10:
+ case GPCMD_WRITE_12:
+ case GPCMD_WRITE_AND_VERIFY_10:
+ return ATAPI_WRITE;
+
+ case GPCMD_READ_CD:
+ case GPCMD_READ_CD_MSF:
+ return ATAPI_READ_CD;
+
+ case ATA_16:
+ case ATA_12:
+ if (atapi_passthru16)
+ return ATAPI_PASS_THRU;
+ /* fall thru */
+ default:
+ return ATAPI_MISC;
+ }
+}
+
+/**
* ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
* @tf: Taskfile to convert
* @pmp: Port multiplier port
* RETURNS:
* Packed xfer_mask.
*/
-static unsigned int ata_pack_xfermask(unsigned int pio_mask,
- unsigned int mwdma_mask,
- unsigned int udma_mask)
+unsigned long ata_pack_xfermask(unsigned long pio_mask,
+ unsigned long mwdma_mask,
+ unsigned long udma_mask)
{
return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
* Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
* Any NULL distination masks will be ignored.
*/
-static void ata_unpack_xfermask(unsigned int xfer_mask,
- unsigned int *pio_mask,
- unsigned int *mwdma_mask,
- unsigned int *udma_mask)
+void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
+ unsigned long *mwdma_mask, unsigned long *udma_mask)
{
if (pio_mask)
*pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
int shift, bits;
u8 base;
} ata_xfer_tbl[] = {
- { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
- { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
- { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
+ { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
+ { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
+ { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
{ -1, },
};
* None.
*
* RETURNS:
- * Matching XFER_* value, 0 if no match found.
+ * Matching XFER_* value, 0xff if no match found.
*/
-static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
+u8 ata_xfer_mask2mode(unsigned long xfer_mask)
{
int highbit = fls(xfer_mask) - 1;
const struct ata_xfer_ent *ent;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
return ent->base + highbit - ent->shift;
- return 0;
+ return 0xff;
}
/**
* RETURNS:
* Matching xfer_mask, 0 if no match found.
*/
-static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
+unsigned long ata_xfer_mode2mask(u8 xfer_mode)
{
const struct ata_xfer_ent *ent;
for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
- return 1 << (ent->shift + xfer_mode - ent->base);
+ return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
+ & ~((1 << ent->shift) - 1);
return 0;
}
* RETURNS:
* Matching xfer_shift, -1 if no match found.
*/
-static int ata_xfer_mode2shift(unsigned int xfer_mode)
+int ata_xfer_mode2shift(unsigned long xfer_mode)
{
const struct ata_xfer_ent *ent;
* Constant C string representing highest speed listed in
* @mode_mask, or the constant C string "<n/a>".
*/
-static const char *ata_mode_string(unsigned int xfer_mask)
+const char *ata_mode_string(unsigned long xfer_mask)
{
static const char * const xfer_mode_str[] = {
"PIO0",
if (ata_dev_enabled(dev)) {
if (ata_msg_drv(dev->link->ap))
ata_dev_printk(dev, KERN_WARNING, "disabled\n");
+ ata_acpi_on_disable(dev);
ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
ATA_DNXFER_QUIET);
dev->class++;
void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
{
ap->pm_policy = policy;
- ap->link.eh_info.action |= ATA_EHI_LPM;
+ ap->link.eh_info.action |= ATA_EH_LPM;
ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
ata_port_schedule_eh(ap);
}
}
#endif /* CONFIG_PM */
-
-/**
- * ata_devchk - PATA device presence detection
- * @ap: ATA channel to examine
- * @device: Device to examine (starting at zero)
- *
- * This technique was originally described in
- * Hale Landis's ATADRVR (www.ata-atapi.com), and
- * later found its way into the ATA/ATAPI spec.
- *
- * Write a pattern to the ATA shadow registers,
- * and if a device is present, it will respond by
- * correctly storing and echoing back the
- * ATA shadow register contents.
- *
- * LOCKING:
- * caller.
- */
-
-static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- u8 nsect, lbal;
-
- ap->ops->dev_select(ap, device);
-
- iowrite8(0x55, ioaddr->nsect_addr);
- iowrite8(0xaa, ioaddr->lbal_addr);
-
- iowrite8(0xaa, ioaddr->nsect_addr);
- iowrite8(0x55, ioaddr->lbal_addr);
-
- iowrite8(0x55, ioaddr->nsect_addr);
- iowrite8(0xaa, ioaddr->lbal_addr);
-
- nsect = ioread8(ioaddr->nsect_addr);
- lbal = ioread8(ioaddr->lbal_addr);
-
- if ((nsect == 0x55) && (lbal == 0xaa))
- return 1; /* we found a device */
-
- return 0; /* nothing found */
-}
-
/**
* ata_dev_classify - determine device type based on ATA-spec signature
* @tf: ATA taskfile register set for device to be identified
}
/**
- * ata_dev_try_classify - Parse returned ATA device signature
- * @dev: ATA device to classify (starting at zero)
- * @present: device seems present
- * @r_err: Value of error register on completion
- *
- * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
- * an ATA/ATAPI-defined set of values is placed in the ATA
- * shadow registers, indicating the results of device detection
- * and diagnostics.
- *
- * Select the ATA device, and read the values from the ATA shadow
- * registers. Then parse according to the Error register value,
- * and the spec-defined values examined by ata_dev_classify().
- *
- * LOCKING:
- * caller.
- *
- * RETURNS:
- * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
- */
-unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
- u8 *r_err)
-{
- struct ata_port *ap = dev->link->ap;
- struct ata_taskfile tf;
- unsigned int class;
- u8 err;
-
- ap->ops->dev_select(ap, dev->devno);
-
- memset(&tf, 0, sizeof(tf));
-
- ap->ops->tf_read(ap, &tf);
- err = tf.feature;
- if (r_err)
- *r_err = err;
-
- /* see if device passed diags: if master then continue and warn later */
- if (err == 0 && dev->devno == 0)
- /* diagnostic fail : do nothing _YET_ */
- dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
- else if (err == 1)
- /* do nothing */ ;
- else if ((dev->devno == 0) && (err == 0x81))
- /* do nothing */ ;
- else
- return ATA_DEV_NONE;
-
- /* determine if device is ATA or ATAPI */
- class = ata_dev_classify(&tf);
-
- if (class == ATA_DEV_UNKNOWN) {
- /* If the device failed diagnostic, it's likely to
- * have reported incorrect device signature too.
- * Assume ATA device if the device seems present but
- * device signature is invalid with diagnostic
- * failure.
- */
- if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
- class = ATA_DEV_ATA;
- else
- class = ATA_DEV_NONE;
- } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
- class = ATA_DEV_NONE;
-
- return class;
-}
-
-/**
* ata_id_string - Convert IDENTIFY DEVICE page into string
* @id: IDENTIFY DEVICE results we will examine
* @s: string into which data is output
}
}
-static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
+u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
{
u64 sectors = 0;
sectors |= (tf->lbam & 0xff) << 8;
sectors |= (tf->lbal & 0xff);
- return ++sectors;
+ return sectors;
}
-static u64 ata_tf_to_lba(struct ata_taskfile *tf)
+u64 ata_tf_to_lba(const struct ata_taskfile *tf)
{
u64 sectors = 0;
sectors |= (tf->lbam & 0xff) << 8;
sectors |= (tf->lbal & 0xff);
- return ++sectors;
+ return sectors;
}
/**
}
if (lba48)
- *max_sectors = ata_tf_to_lba48(&tf);
+ *max_sectors = ata_tf_to_lba48(&tf) + 1;
else
- *max_sectors = ata_tf_to_lba(&tf);
+ *max_sectors = ata_tf_to_lba(&tf) + 1;
if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
(*max_sectors)--;
return 0;
/* read native max address */
rc = ata_read_native_max_address(dev, &native_sectors);
if (rc) {
- /* If HPA isn't going to be unlocked, skip HPA
- * resizing from the next try.
+ /* If device aborted the command or HPA isn't going to
+ * be unlocked, skip HPA resizing.
*/
- if (!ata_ignore_hpa) {
+ if (rc == -EACCES || !ata_ignore_hpa) {
ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
- "broken, will skip HPA handling\n");
+ "broken, skipping HPA handling\n");
dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
/* we can continue if device aborted the command */
}
/**
- * ata_id_to_dma_mode - Identify DMA mode from id block
- * @dev: device to identify
- * @unknown: mode to assume if we cannot tell
- *
- * Set up the timing values for the device based upon the identify
- * reported values for the DMA mode. This function is used by drivers
- * which rely upon firmware configured modes, but wish to report the
- * mode correctly when possible.
- *
- * In addition we emit similarly formatted messages to the default
- * ata_dev_set_mode handler, in order to provide consistency of
- * presentation.
- */
-
-void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
-{
- unsigned int mask;
- u8 mode;
-
- /* Pack the DMA modes */
- mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
- if (dev->id[53] & 0x04)
- mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
-
- /* Select the mode in use */
- mode = ata_xfer_mask2mode(mask);
-
- if (mode != 0) {
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
- ata_mode_string(mask));
- } else {
- /* SWDMA perhaps ? */
- mode = unknown;
- ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
- }
-
- /* Configure the device reporting */
- dev->xfer_mode = mode;
- dev->xfer_shift = ata_xfer_mode2shift(mode);
-}
-
-/**
- * ata_noop_dev_select - Select device 0/1 on ATA bus
- * @ap: ATA channel to manipulate
- * @device: ATA device (numbered from zero) to select
- *
- * This function performs no actual function.
- *
- * May be used as the dev_select() entry in ata_port_operations.
- *
- * LOCKING:
- * caller.
- */
-void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
-{
-}
-
-
-/**
- * ata_std_dev_select - Select device 0/1 on ATA bus
- * @ap: ATA channel to manipulate
- * @device: ATA device (numbered from zero) to select
- *
- * Use the method defined in the ATA specification to
- * make either device 0, or device 1, active on the
- * ATA channel. Works with both PIO and MMIO.
- *
- * May be used as the dev_select() entry in ata_port_operations.
- *
- * LOCKING:
- * caller.
- */
-
-void ata_std_dev_select(struct ata_port *ap, unsigned int device)
-{
- u8 tmp;
-
- if (device == 0)
- tmp = ATA_DEVICE_OBS;
- else
- tmp = ATA_DEVICE_OBS | ATA_DEV1;
-
- iowrite8(tmp, ap->ioaddr.device_addr);
- ata_pause(ap); /* needed; also flushes, for mmio */
-}
-
-/**
- * ata_dev_select - Select device 0/1 on ATA bus
- * @ap: ATA channel to manipulate
- * @device: ATA device (numbered from zero) to select
- * @wait: non-zero to wait for Status register BSY bit to clear
- * @can_sleep: non-zero if context allows sleeping
- *
- * Use the method defined in the ATA specification to
- * make either device 0, or device 1, active on the
- * ATA channel.
- *
- * This is a high-level version of ata_std_dev_select(),
- * which additionally provides the services of inserting
- * the proper pauses and status polling, where needed.
- *
- * LOCKING:
- * caller.
- */
-
-void ata_dev_select(struct ata_port *ap, unsigned int device,
- unsigned int wait, unsigned int can_sleep)
-{
- if (ata_msg_probe(ap))
- ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
- "device %u, wait %u\n", device, wait);
-
- if (wait)
- ata_wait_idle(ap);
-
- ap->ops->dev_select(ap, device);
-
- if (wait) {
- if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
- msleep(150);
- ata_wait_idle(ap);
- }
-}
-
-/**
* ata_dump_id - IDENTIFY DEVICE info debugging output
* @id: IDENTIFY DEVICE page to dump
*
* RETURNS:
* Computed xfermask
*/
-static unsigned int ata_id_xfermask(const u16 *id)
+unsigned long ata_id_xfermask(const u16 *id)
{
- unsigned int pio_mask, mwdma_mask, udma_mask;
+ unsigned long pio_mask, mwdma_mask, udma_mask;
/* Usual case. Word 53 indicates word 64 is valid */
if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
}
/**
- * ata_port_queue_task - Queue port_task
+ * ata_pio_queue_task - Queue port_task
* @ap: The ata_port to queue port_task for
* @fn: workqueue function to be scheduled
* @data: data for @fn to use
* one task is active at any given time.
*
* libata core layer takes care of synchronization between
- * port_task and EH. ata_port_queue_task() may be ignored for EH
+ * port_task and EH. ata_pio_queue_task() may be ignored for EH
* synchronization.
*
* LOCKING:
* Inherited from caller.
*/
-void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
- unsigned long delay)
+void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
{
- PREPARE_DELAYED_WORK(&ap->port_task, fn);
ap->port_task_data = data;
/* may fail if ata_port_flush_task() in progress */
cancel_rearming_delayed_work(&ap->port_task);
if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
+ ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
}
static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
- ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
retry:
ata_tf_init(dev, &tf);
id, sizeof(id[0]) * ATA_ID_WORDS, 0);
if (err_mask) {
if (err_mask & AC_ERR_NODEV_HINT) {
- DPRINTK("ata%u.%d: NODEV after polling detection\n",
- ap->print_id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG,
+ "NODEV after polling detection\n");
return -ENOENT;
}
- /* Device or controller might have reported the wrong
- * device class. Give a shot at the other IDENTIFY if
- * the current one is aborted by the device.
- */
- if (may_fallback &&
- (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
- may_fallback = 0;
+ if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
+ /* Device or controller might have reported
+ * the wrong device class. Give a shot at the
+ * other IDENTIFY if the current one is
+ * aborted by the device.
+ */
+ if (may_fallback) {
+ may_fallback = 0;
- if (class == ATA_DEV_ATA)
- class = ATA_DEV_ATAPI;
- else
- class = ATA_DEV_ATA;
- goto retry;
+ if (class == ATA_DEV_ATA)
+ class = ATA_DEV_ATAPI;
+ else
+ class = ATA_DEV_ATA;
+ goto retry;
+ }
+
+ /* Control reaches here iff the device aborted
+ * both flavors of IDENTIFYs which happens
+ * sometimes with phantom devices.
+ */
+ ata_dev_printk(dev, KERN_DEBUG,
+ "both IDENTIFYs aborted, assuming NODEV\n");
+ return -ENOENT;
}
rc = -EIO;
struct ata_eh_context *ehc = &dev->link->eh_context;
int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
const u16 *id = dev->id;
- unsigned int xfer_mask;
+ unsigned long xfer_mask;
char revbuf[7]; /* XYZ-99\0 */
char fwrevbuf[ATA_ID_FW_REV_LEN+1];
char modelbuf[ATA_ID_PROD_LEN+1];
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
- __FUNCTION__);
+ __func__);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
/* set horkage */
dev->horkage |= ata_dev_blacklisted(dev);
+ ata_force_horkage(dev);
/* let ACPI work its magic */
rc = ata_acpi_on_devcfg(dev);
ata_dev_printk(dev, KERN_DEBUG,
"%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
"85:%04x 86:%04x 87:%04x 88:%04x\n",
- __FUNCTION__,
+ __func__,
id[49], id[82], id[83], id[84],
id[85], id[86], id[87], id[88]);
"supports DRM functions and may "
"not be fully accessable.\n");
snprintf(revbuf, 7, "CFA");
- } else
+ } else {
snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
+ /* Warn the user if the device has TPM extensions */
+ if (ata_id_has_tpm(id))
+ ata_dev_printk(dev, KERN_WARNING,
+ "supports DRM functions and may "
+ "not be fully accessable.\n");
+ }
dev->n_sectors = ata_id_n_sectors(id);
else if (dev->class == ATA_DEV_ATAPI) {
const char *cdb_intr_string = "";
const char *atapi_an_string = "";
+ const char *dma_dir_string = "";
u32 sntf;
rc = atapi_cdb_len(id);
* changed notifications and ATAPI ANs.
*/
if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
- (!ap->nr_pmp_links ||
+ (!sata_pmp_attached(ap) ||
sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
unsigned int err_mask;
cdb_intr_string = ", CDB intr";
}
+ if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
+ dev->flags |= ATA_DFLAG_DMADIR;
+ dma_dir_string = ", DMADIR";
+ }
+
/* print device info to dmesg */
if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO,
- "ATAPI: %s, %s, max %s%s%s\n",
+ "ATAPI: %s, %s, max %s%s%s%s\n",
modelbuf, fwrevbuf,
ata_mode_string(xfer_mask),
- cdb_intr_string, atapi_an_string);
+ cdb_intr_string, atapi_an_string,
+ dma_dir_string);
}
/* determine max_sectors */
dev->flags |= ATA_DFLAG_DIPM;
}
- if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
- /* Let the user know. We don't want to disallow opens for
- rescue purposes, or in case the vendor is just a blithering
- idiot */
- if (print_info) {
- ata_dev_printk(dev, KERN_WARNING,
-"Drive reports diagnostics failure. This may indicate a drive\n");
- ata_dev_printk(dev, KERN_WARNING,
-"fault or invalid emulation. Contact drive vendor for information.\n");
- }
- }
-
- /* limit bridge transfers to udma5, 200 sectors */
+ /* Limit PATA drive on SATA cable bridge transfers to udma5,
+ 200 sectors */
if (ata_dev_knobble(dev)) {
if (ata_msg_drv(ap) && print_info)
ata_dev_printk(dev, KERN_INFO,
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
- if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
- __FUNCTION__, ata_chk_status(ap));
+ if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
+ /* Let the user know. We don't want to disallow opens for
+ rescue purposes, or in case the vendor is just a blithering
+ idiot. Do this after the dev_config call as some controllers
+ with buggy firmware may want to avoid reporting false device
+ bugs */
+
+ if (print_info) {
+ ata_dev_printk(dev, KERN_WARNING,
+"Drive reports diagnostics failure. This may indicate a drive\n");
+ ata_dev_printk(dev, KERN_WARNING,
+"fault or invalid emulation. Contact drive vendor for information.\n");
+ }
+ }
+
return 0;
err_out_nosup:
if (ata_msg_probe(ap))
ata_dev_printk(dev, KERN_DEBUG,
- "%s: EXIT, err\n", __FUNCTION__);
+ "%s: EXIT, err\n", __func__);
return rc;
}
}
/**
+ * ata_cable_ignore - return ignored PATA cable.
+ * @ap: port
+ *
+ * Helper method for drivers which don't use cable type to limit
+ * transfer mode.
+ */
+int ata_cable_ignore(struct ata_port *ap)
+{
+ return ATA_CBL_PATA_IGN;
+}
+
+/**
* ata_cable_sata - return SATA cable type
* @ap: port
*
specific sequence bass-ackwards so that PDIAG- is released by
the slave device */
- ata_link_for_each_dev(dev, &ap->link) {
+ ata_link_for_each_dev_reverse(dev, &ap->link) {
if (tries[dev->devno])
dev->class = classes[dev->devno];
* LOCKING:
* None.
*/
-void sata_print_link_status(struct ata_link *link)
+static void sata_print_link_status(struct ata_link *link)
{
u32 sstatus, scontrol, tmp;
* RETURNS:
* 1 if SATA spd configuration is needed, 0 otherwise.
*/
-int sata_set_spd_needed(struct ata_link *link)
+static int sata_set_spd_needed(struct ata_link *link)
{
u32 scontrol;
*/
static const struct ata_timing ata_timing[] = {
+/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
+ { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
+ { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
+ { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
+ { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
+ { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
+ { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
+ { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
- { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
- { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
- { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
- { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
-
- { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
- { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
- { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
- { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
- { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
-
-/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
-
- { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
- { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
- { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
-
- { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
- { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
{ XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
+ { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
+ { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
- { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
- { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
- { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
- { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
-
- { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
- { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
- { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
+ { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
+ { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
+ { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
+ { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
+ { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
-/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
+/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
+ { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
+ { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
+ { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
+ { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
+ { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
+ { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
+ { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
{ 0xFF }
};
if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
}
-static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
+const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
{
- const struct ata_timing *t;
+ const struct ata_timing *t = ata_timing;
- for (t = ata_timing; t->mode != speed; t++)
- if (t->mode == 0xFF)
- return NULL;
- return t;
+ while (xfer_mode > t->mode)
+ t++;
+
+ if (xfer_mode == t->mode)
+ return t;
+ return NULL;
}
int ata_timing_compute(struct ata_device *adev, unsigned short speed,
}
/**
+ * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
+ * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
+ * @cycle: cycle duration in ns
+ *
+ * Return matching xfer mode for @cycle. The returned mode is of
+ * the transfer type specified by @xfer_shift. If @cycle is too
+ * slow for @xfer_shift, 0xff is returned. If @cycle is faster
+ * than the fastest known mode, the fasted mode is returned.
+ *
+ * LOCKING:
+ * None.
+ *
+ * RETURNS:
+ * Matching xfer_mode, 0xff if no match found.
+ */
+u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
+{
+ u8 base_mode = 0xff, last_mode = 0xff;
+ const struct ata_xfer_ent *ent;
+ const struct ata_timing *t;
+
+ for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
+ if (ent->shift == xfer_shift)
+ base_mode = ent->base;
+
+ for (t = ata_timing_find_mode(base_mode);
+ t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
+ unsigned short this_cycle;
+
+ switch (xfer_shift) {
+ case ATA_SHIFT_PIO:
+ case ATA_SHIFT_MWDMA:
+ this_cycle = t->cycle;
+ break;
+ case ATA_SHIFT_UDMA:
+ this_cycle = t->udma;
+ break;
+ default:
+ return 0xff;
+ }
+
+ if (cycle > this_cycle)
+ break;
+
+ last_mode = t->mode;
+ }
+
+ return last_mode;
+}
+
+/**
* ata_down_xfermask_limit - adjust dev xfer masks downward
* @dev: Device to adjust xfer masks
* @sel: ATA_DNXFER_* selector
int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
{
char buf[32];
- unsigned int orig_mask, xfer_mask;
- unsigned int pio_mask, mwdma_mask, udma_mask;
+ unsigned long orig_mask, xfer_mask;
+ unsigned long pio_mask, mwdma_mask, udma_mask;
int quiet, highbit;
quiet = !!(sel & ATA_DNXFER_QUIET);
static int ata_dev_set_mode(struct ata_device *dev)
{
struct ata_eh_context *ehc = &dev->link->eh_context;
+ const char *dev_err_whine = "";
+ int ign_dev_err = 0;
unsigned int err_mask;
int rc;
err_mask = ata_dev_set_xfermode(dev);
- /* Old CFA may refuse this command, which is just fine */
- if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
- err_mask &= ~AC_ERR_DEV;
+ if (err_mask & ~AC_ERR_DEV)
+ goto fail;
- /* Some very old devices and some bad newer ones fail any kind of
- SET_XFERMODE request but support PIO0-2 timings and no IORDY */
- if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
- dev->pio_mode <= XFER_PIO_2)
- err_mask &= ~AC_ERR_DEV;
+ /* revalidate */
+ ehc->i.flags |= ATA_EHI_POST_SETMODE;
+ rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
+ ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
+ if (rc)
+ return rc;
+ if (dev->xfer_shift == ATA_SHIFT_PIO) {
+ /* Old CFA may refuse this command, which is just fine */
+ if (ata_id_is_cfa(dev->id))
+ ign_dev_err = 1;
+ /* Catch several broken garbage emulations plus some pre
+ ATA devices */
+ if (ata_id_major_version(dev->id) == 0 &&
+ dev->pio_mode <= XFER_PIO_2)
+ ign_dev_err = 1;
+ /* Some very old devices and some bad newer ones fail
+ any kind of SET_XFERMODE request but support PIO0-2
+ timings and no IORDY */
+ if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
+ ign_dev_err = 1;
+ }
/* Early MWDMA devices do DMA but don't allow DMA mode setting.
Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
- if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
+ if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
dev->dma_mode == XFER_MW_DMA_0 &&
(dev->id[63] >> 8) & 1)
- err_mask &= ~AC_ERR_DEV;
+ ign_dev_err = 1;
- if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
- "(err_mask=0x%x)\n", err_mask);
- return -EIO;
- }
+ /* if the device is actually configured correctly, ignore dev err */
+ if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
+ ign_dev_err = 1;
- ehc->i.flags |= ATA_EHI_POST_SETMODE;
- rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
- ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
- if (rc)
- return rc;
+ if (err_mask & AC_ERR_DEV) {
+ if (!ign_dev_err)
+ goto fail;
+ else
+ dev_err_whine = " (device error ignored)";
+ }
DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
dev->xfer_shift, (int)dev->xfer_mode);
- ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
- ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
+ ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
+ ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
+ dev_err_whine);
+
return 0;
+
+ fail:
+ ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
}
/**
* ata_do_set_mode - Program timings and issue SET FEATURES - XFER
* @link: link on which timings will be programmed
- * @r_failed_dev: out paramter for failed device
+ * @r_failed_dev: out parameter for failed device
*
* Standard implementation of the function used to tune and set
* ATA device disk transfer mode (PIO3, UDMA6, etc.). If
/* step 1: calculate xfer_mask */
ata_link_for_each_dev(dev, link) {
- unsigned int pio_mask, dma_mask;
+ unsigned long pio_mask, dma_mask;
unsigned int mode_mask;
if (!ata_dev_enabled(dev))
mode_mask = ATA_DMA_MASK_CFA;
ata_dev_xfermask(dev);
+ ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
dev->dma_mode = ata_xfer_mask2mode(dma_mask);
found = 1;
- if (dev->dma_mode)
+ if (dev->dma_mode != 0xff)
used_dma = 1;
}
if (!found)
if (!ata_dev_enabled(dev))
continue;
- if (!dev->pio_mode) {
+ if (dev->pio_mode == 0xff) {
ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
rc = -EINVAL;
goto out;
/* step 3: set host DMA timings */
ata_link_for_each_dev(dev, link) {
- if (!ata_dev_enabled(dev) || !dev->dma_mode)
+ if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
continue;
dev->xfer_mode = dev->dma_mode;
}
/**
- * ata_set_mode - Program timings and issue SET FEATURES - XFER
- * @link: link on which timings will be programmed
- * @r_failed_dev: out paramter for failed device
- *
- * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
- * ata_set_mode() fails, pointer to the failing device is
- * returned in @r_failed_dev.
- *
- * LOCKING:
- * PCI/etc. bus probe sem.
- *
- * RETURNS:
- * 0 on success, negative errno otherwise
- */
-int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
-{
- struct ata_port *ap = link->ap;
-
- /* has private set_mode? */
- if (ap->ops->set_mode)
- return ap->ops->set_mode(link, r_failed_dev);
- return ata_do_set_mode(link, r_failed_dev);
-}
-
-/**
- * ata_tf_to_host - issue ATA taskfile to host controller
- * @ap: port to which command is being issued
- * @tf: ATA taskfile register set
- *
- * Issues ATA taskfile register set to ATA host controller,
- * with proper synchronization with interrupt handler and
- * other threads.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-
-static inline void ata_tf_to_host(struct ata_port *ap,
- const struct ata_taskfile *tf)
-{
- ap->ops->tf_load(ap, tf);
- ap->ops->exec_command(ap, tf);
-}
-
-/**
- * ata_busy_sleep - sleep until BSY clears, or timeout
- * @ap: port containing status register to be polled
- * @tmout_pat: impatience timeout
- * @tmout: overall timeout
- *
- * Sleep until ATA Status register bit BSY clears,
- * or a timeout occurs.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int ata_busy_sleep(struct ata_port *ap,
- unsigned long tmout_pat, unsigned long tmout)
-{
- unsigned long timer_start, timeout;
- u8 status;
-
- status = ata_busy_wait(ap, ATA_BUSY, 300);
- timer_start = jiffies;
- timeout = timer_start + tmout_pat;
- while (status != 0xff && (status & ATA_BUSY) &&
- time_before(jiffies, timeout)) {
- msleep(50);
- status = ata_busy_wait(ap, ATA_BUSY, 3);
- }
-
- if (status != 0xff && (status & ATA_BUSY))
- ata_port_printk(ap, KERN_WARNING,
- "port is slow to respond, please be patient "
- "(Status 0x%x)\n", status);
-
- timeout = timer_start + tmout;
- while (status != 0xff && (status & ATA_BUSY) &&
- time_before(jiffies, timeout)) {
- msleep(50);
- status = ata_chk_status(ap);
- }
-
- if (status == 0xff)
- return -ENODEV;
-
- if (status & ATA_BUSY) {
- ata_port_printk(ap, KERN_ERR, "port failed to respond "
- "(%lu secs, Status 0x%x)\n",
- tmout / HZ, status);
- return -EBUSY;
- }
-
- return 0;
-}
-
-/**
- * ata_wait_after_reset - wait before checking status after reset
- * @ap: port containing status register to be polled
+ * ata_wait_ready - wait for link to become ready
+ * @link: link to be waited on
* @deadline: deadline jiffies for the operation
+ * @check_ready: callback to check link readiness
*
- * After reset, we need to pause a while before reading status.
- * Also, certain combination of controller and device report 0xff
- * for some duration (e.g. until SATA PHY is up and running)
- * which is interpreted as empty port in ATA world. This
- * function also waits for such devices to get out of 0xff
- * status.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- */
-void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
-{
- unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
-
- if (time_before(until, deadline))
- deadline = until;
-
- /* Spec mandates ">= 2ms" before checking status. We wait
- * 150ms, because that was the magic delay used for ATAPI
- * devices in Hale Landis's ATADRVR, for the period of time
- * between when the ATA command register is written, and then
- * status is checked. Because waiting for "a while" before
- * checking status is fine, post SRST, we perform this magic
- * delay here as well.
- *
- * Old drivers/ide uses the 2mS rule and then waits for ready.
- */
- msleep(150);
-
- /* Wait for 0xff to clear. Some SATA devices take a long time
- * to clear 0xff after reset. For example, HHD424020F7SV00
- * iVDR needs >= 800ms while. Quantum GoVault needs even more
- * than that.
- *
- * Note that some PATA controllers (pata_ali) explode if
- * status register is read more than once when there's no
- * device attached.
- */
- if (ap->flags & ATA_FLAG_SATA) {
- while (1) {
- u8 status = ata_chk_status(ap);
-
- if (status != 0xff || time_after(jiffies, deadline))
- return;
-
- msleep(50);
- }
- }
-}
-
-/**
- * ata_wait_ready - sleep until BSY clears, or timeout
- * @ap: port containing status register to be polled
- * @deadline: deadline jiffies for the operation
+ * Wait for @link to become ready. @check_ready should return
+ * positive number if @link is ready, 0 if it isn't, -ENODEV if
+ * link doesn't seem to be occupied, other errno for other error
+ * conditions.
*
- * Sleep until ATA Status register bit BSY clears, or timeout
- * occurs.
+ * Transient -ENODEV conditions are allowed for
+ * ATA_TMOUT_FF_WAIT.
*
* LOCKING:
- * Kernel thread context (may sleep).
+ * EH context.
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * 0 if @linke is ready before @deadline; otherwise, -errno.
*/
-int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
+int ata_wait_ready(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
{
unsigned long start = jiffies;
+ unsigned long nodev_deadline = start + ATA_TMOUT_FF_WAIT;
int warned = 0;
+ if (time_after(nodev_deadline, deadline))
+ nodev_deadline = deadline;
+
while (1) {
- u8 status = ata_chk_status(ap);
unsigned long now = jiffies;
+ int ready, tmp;
- if (!(status & ATA_BUSY))
+ ready = tmp = check_ready(link);
+ if (ready > 0)
return 0;
- if (!ata_link_online(&ap->link) && status == 0xff)
- return -ENODEV;
+
+ /* -ENODEV could be transient. Ignore -ENODEV if link
+ * is online. Also, some SATA devices take a long
+ * time to clear 0xff after reset. For example,
+ * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
+ * GoVault needs even more than that. Wait for
+ * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
+ *
+ * Note that some PATA controllers (pata_ali) explode
+ * if status register is read more than once when
+ * there's no device attached.
+ */
+ if (ready == -ENODEV) {
+ if (ata_link_online(link))
+ ready = 0;
+ else if ((link->ap->flags & ATA_FLAG_SATA) &&
+ !ata_link_offline(link) &&
+ time_before(now, nodev_deadline))
+ ready = 0;
+ }
+
+ if (ready)
+ return ready;
if (time_after(now, deadline))
return -EBUSY;
if (!warned && time_after(now, start + 5 * HZ) &&
(deadline - now > 3 * HZ)) {
- ata_port_printk(ap, KERN_WARNING,
- "port is slow to respond, please be patient "
- "(Status 0x%x)\n", status);
+ ata_link_printk(link, KERN_WARNING,
+ "link is slow to respond, please be patient "
+ "(ready=%d)\n", tmp);
warned = 1;
}
}
}
-static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
- unsigned long deadline)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int dev0 = devmask & (1 << 0);
- unsigned int dev1 = devmask & (1 << 1);
- int rc, ret = 0;
-
- /* if device 0 was found in ata_devchk, wait for its
- * BSY bit to clear
- */
- if (dev0) {
- rc = ata_wait_ready(ap, deadline);
- if (rc) {
- if (rc != -ENODEV)
- return rc;
- ret = rc;
- }
- }
-
- /* if device 1 was found in ata_devchk, wait for register
- * access briefly, then wait for BSY to clear.
- */
- if (dev1) {
- int i;
-
- ap->ops->dev_select(ap, 1);
-
- /* Wait for register access. Some ATAPI devices fail
- * to set nsect/lbal after reset, so don't waste too
- * much time on it. We're gonna wait for !BSY anyway.
- */
- for (i = 0; i < 2; i++) {
- u8 nsect, lbal;
-
- nsect = ioread8(ioaddr->nsect_addr);
- lbal = ioread8(ioaddr->lbal_addr);
- if ((nsect == 1) && (lbal == 1))
- break;
- msleep(50); /* give drive a breather */
- }
-
- rc = ata_wait_ready(ap, deadline);
- if (rc) {
- if (rc != -ENODEV)
- return rc;
- ret = rc;
- }
- }
-
- /* is all this really necessary? */
- ap->ops->dev_select(ap, 0);
- if (dev1)
- ap->ops->dev_select(ap, 1);
- if (dev0)
- ap->ops->dev_select(ap, 0);
-
- return ret;
-}
-
-static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
- unsigned long deadline)
-{
- struct ata_ioports *ioaddr = &ap->ioaddr;
-
- DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
- /* software reset. causes dev0 to be selected */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- udelay(20); /* FIXME: flush */
- iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
- udelay(20); /* FIXME: flush */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
-
- /* wait a while before checking status */
- ata_wait_after_reset(ap, deadline);
-
- /* Before we perform post reset processing we want to see if
- * the bus shows 0xFF because the odd clown forgets the D7
- * pulldown resistor.
- */
- if (ata_chk_status(ap) == 0xFF)
- return -ENODEV;
-
- return ata_bus_post_reset(ap, devmask, deadline);
-}
-
/**
- * ata_bus_reset - reset host port and associated ATA channel
- * @ap: port to reset
+ * ata_wait_after_reset - wait for link to become ready after reset
+ * @link: link to be waited on
+ * @deadline: deadline jiffies for the operation
+ * @check_ready: callback to check link readiness
*
- * This is typically the first time we actually start issuing
- * commands to the ATA channel. We wait for BSY to clear, then
- * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
- * result. Determine what devices, if any, are on the channel
- * by looking at the device 0/1 error register. Look at the signature
- * stored in each device's taskfile registers, to determine if
- * the device is ATA or ATAPI.
+ * Wait for @link to become ready after reset.
*
* LOCKING:
- * PCI/etc. bus probe sem.
- * Obtains host lock.
+ * EH context.
*
- * SIDE EFFECTS:
- * Sets ATA_FLAG_DISABLED if bus reset fails.
+ * RETURNS:
+ * 0 if @linke is ready before @deadline; otherwise, -errno.
*/
-
-void ata_bus_reset(struct ata_port *ap)
+int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
+ int (*check_ready)(struct ata_link *link))
{
- struct ata_device *device = ap->link.device;
- struct ata_ioports *ioaddr = &ap->ioaddr;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- u8 err;
- unsigned int dev0, dev1 = 0, devmask = 0;
- int rc;
-
- DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
-
- /* determine if device 0/1 are present */
- if (ap->flags & ATA_FLAG_SATA_RESET)
- dev0 = 1;
- else {
- dev0 = ata_devchk(ap, 0);
- if (slave_possible)
- dev1 = ata_devchk(ap, 1);
- }
-
- if (dev0)
- devmask |= (1 << 0);
- if (dev1)
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->dev_select(ap, 0);
-
- /* issue bus reset */
- if (ap->flags & ATA_FLAG_SRST) {
- rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
- if (rc && rc != -ENODEV)
- goto err_out;
- }
+ msleep(ATA_WAIT_AFTER_RESET_MSECS);
- /*
- * determine by signature whether we have ATA or ATAPI devices
- */
- device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
- if ((slave_possible) && (err != 0x81))
- device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
-
- /* is double-select really necessary? */
- if (device[1].class != ATA_DEV_NONE)
- ap->ops->dev_select(ap, 1);
- if (device[0].class != ATA_DEV_NONE)
- ap->ops->dev_select(ap, 0);
-
- /* if no devices were detected, disable this port */
- if ((device[0].class == ATA_DEV_NONE) &&
- (device[1].class == ATA_DEV_NONE))
- goto err_out;
-
- if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
- /* set up device control for ATA_FLAG_SATA_RESET */
- iowrite8(ap->ctl, ioaddr->ctl_addr);
- }
-
- DPRINTK("EXIT\n");
- return;
-
-err_out:
- ata_port_printk(ap, KERN_ERR, "disabling port\n");
- ata_port_disable(ap);
-
- DPRINTK("EXIT\n");
+ return ata_wait_ready(link, deadline, check_ready);
}
/**
int sata_link_resume(struct ata_link *link, const unsigned long *params,
unsigned long deadline)
{
- u32 scontrol;
+ u32 scontrol, serror;
int rc;
if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
*/
msleep(200);
- return sata_link_debounce(link, params, deadline);
+ if ((rc = sata_link_debounce(link, params, deadline)))
+ return rc;
+
+ /* Clear SError. PMP and some host PHYs require this to
+ * operate and clearing should be done before checking PHY
+ * online status to avoid race condition (hotplugging between
+ * link resume and status check).
+ */
+ if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
+ rc = sata_scr_write(link, SCR_ERROR, serror);
+ if (rc == 0 || rc == -EINVAL) {
+ unsigned long flags;
+
+ spin_lock_irqsave(link->ap->lock, flags);
+ link->eh_info.serror = 0;
+ spin_unlock_irqrestore(link->ap->lock, flags);
+ rc = 0;
+ }
+ return rc;
}
/**
const unsigned long *timing = sata_ehc_deb_timing(ehc);
int rc;
- /* handle link resume */
- if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
- (link->flags & ATA_LFLAG_HRST_TO_RESUME))
- ehc->i.action |= ATA_EH_HARDRESET;
-
- /* Some PMPs don't work with only SRST, force hardreset if PMP
- * is supported.
- */
- if (ap->flags & ATA_FLAG_PMP)
- ehc->i.action |= ATA_EH_HARDRESET;
-
/* if we're about to do hardreset, nothing more to do */
if (ehc->i.action & ATA_EH_HARDRESET)
return 0;
"link for reset (errno=%d)\n", rc);
}
- /* Wait for !BSY if the controller can wait for the first D2H
- * Reg FIS and we don't know that no device is attached.
- */
- if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
- rc = ata_wait_ready(ap, deadline);
- if (rc && rc != -ENODEV) {
- ata_link_printk(link, KERN_WARNING, "device not ready "
- "(errno=%d), forcing hardreset\n", rc);
- ehc->i.action |= ATA_EH_HARDRESET;
- }
- }
+ /* no point in trying softreset on offline link */
+ if (ata_link_offline(link))
+ ehc->i.action &= ~ATA_EH_SOFTRESET;
return 0;
}
/**
- * ata_std_softreset - reset host port via ATA SRST
- * @link: ATA link to reset
- * @classes: resulting classes of attached devices
+ * sata_link_hardreset - reset link via SATA phy reset
+ * @link: link to reset
+ * @timing: timing parameters { interval, duratinon, timeout } in msec
* @deadline: deadline jiffies for the operation
+ * @online: optional out parameter indicating link onlineness
+ * @check_ready: optional callback to check link readiness
+ *
+ * SATA phy-reset @link using DET bits of SControl register.
+ * After hardreset, link readiness is waited upon using
+ * ata_wait_ready() if @check_ready is specified. LLDs are
+ * allowed to not specify @check_ready and wait itself after this
+ * function returns. Device classification is LLD's
+ * responsibility.
*
- * Reset host port using ATA SRST.
- *
- * LOCKING:
- * Kernel thread context (may sleep)
- *
- * RETURNS:
- * 0 on success, -errno otherwise.
- */
-int ata_std_softreset(struct ata_link *link, unsigned int *classes,
- unsigned long deadline)
-{
- struct ata_port *ap = link->ap;
- unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
- unsigned int devmask = 0;
- int rc;
- u8 err;
-
- DPRINTK("ENTER\n");
-
- if (ata_link_offline(link)) {
- classes[0] = ATA_DEV_NONE;
- goto out;
- }
-
- /* determine if device 0/1 are present */
- if (ata_devchk(ap, 0))
- devmask |= (1 << 0);
- if (slave_possible && ata_devchk(ap, 1))
- devmask |= (1 << 1);
-
- /* select device 0 again */
- ap->ops->dev_select(ap, 0);
-
- /* issue bus reset */
- DPRINTK("about to softreset, devmask=%x\n", devmask);
- rc = ata_bus_softreset(ap, devmask, deadline);
- /* if link is occupied, -ENODEV too is an error */
- if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
- ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
- return rc;
- }
-
- /* determine by signature whether we have ATA or ATAPI devices */
- classes[0] = ata_dev_try_classify(&link->device[0],
- devmask & (1 << 0), &err);
- if (slave_possible && err != 0x81)
- classes[1] = ata_dev_try_classify(&link->device[1],
- devmask & (1 << 1), &err);
-
- out:
- DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
- return 0;
-}
-
-/**
- * sata_link_hardreset - reset link via SATA phy reset
- * @link: link to reset
- * @timing: timing parameters { interval, duratinon, timeout } in msec
- * @deadline: deadline jiffies for the operation
- *
- * SATA phy-reset @link using DET bits of SControl register.
+ * *@online is set to one iff reset succeeded and @link is online
+ * after reset.
*
* LOCKING:
* Kernel thread context (may sleep)
* 0 on success, -errno otherwise.
*/
int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
- unsigned long deadline)
+ unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *))
{
u32 scontrol;
int rc;
DPRINTK("ENTER\n");
+ if (online)
+ *online = false;
+
if (sata_set_spd_needed(link)) {
/* SATA spec says nothing about how to reconfigure
* spd. To be on the safe side, turn off phy during
/* bring link back */
rc = sata_link_resume(link, timing, deadline);
+ if (rc)
+ goto out;
+ /* if link is offline nothing more to do */
+ if (ata_link_offline(link))
+ goto out;
+
+ /* Link is online. From this point, -ENODEV too is an error. */
+ if (online)
+ *online = true;
+
+ if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
+ /* If PMP is supported, we have to do follow-up SRST.
+ * Some PMPs don't send D2H Reg FIS after hardreset if
+ * the first port is empty. Wait only for
+ * ATA_TMOUT_PMP_SRST_WAIT.
+ */
+ if (check_ready) {
+ unsigned long pmp_deadline;
+
+ pmp_deadline = jiffies + ATA_TMOUT_PMP_SRST_WAIT;
+ if (time_after(pmp_deadline, deadline))
+ pmp_deadline = deadline;
+ ata_wait_ready(link, pmp_deadline, check_ready);
+ }
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ if (check_ready)
+ rc = ata_wait_ready(link, deadline, check_ready);
out:
+ if (rc && rc != -EAGAIN)
+ ata_link_printk(link, KERN_ERR,
+ "COMRESET failed (errno=%d)\n", rc);
DPRINTK("EXIT, rc=%d\n", rc);
return rc;
}
/**
- * sata_std_hardreset - reset host port via SATA phy reset
+ * sata_std_hardreset - COMRESET w/o waiting or classification
* @link: link to reset
* @class: resulting class of attached device
* @deadline: deadline jiffies for the operation
*
- * SATA phy-reset host port using DET bits of SControl register,
- * wait for !BSY and classify the attached device.
+ * Standard SATA COMRESET w/o waiting or classification.
*
* LOCKING:
* Kernel thread context (may sleep)
*
* RETURNS:
- * 0 on success, -errno otherwise.
+ * 0 if link offline, -EAGAIN if link online, -errno on errors.
*/
int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline)
{
- struct ata_port *ap = link->ap;
const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+ bool online;
int rc;
- DPRINTK("ENTER\n");
-
/* do hardreset */
- rc = sata_link_hardreset(link, timing, deadline);
- if (rc) {
- ata_link_printk(link, KERN_ERR,
- "COMRESET failed (errno=%d)\n", rc);
- return rc;
- }
-
- /* TODO: phy layer with polling, timeouts, etc. */
- if (ata_link_offline(link)) {
- *class = ATA_DEV_NONE;
- DPRINTK("EXIT, link offline\n");
- return 0;
- }
-
- /* wait a while before checking status */
- ata_wait_after_reset(ap, deadline);
-
- /* If PMP is supported, we have to do follow-up SRST. Note
- * that some PMPs don't send D2H Reg FIS after hardreset at
- * all if the first port is empty. Wait for it just for a
- * second and request follow-up SRST.
- */
- if (ap->flags & ATA_FLAG_PMP) {
- ata_wait_ready(ap, jiffies + HZ);
- return -EAGAIN;
- }
-
- rc = ata_wait_ready(ap, deadline);
- /* link occupied, -ENODEV too is an error */
- if (rc) {
- ata_link_printk(link, KERN_ERR,
- "COMRESET failed (errno=%d)\n", rc);
- return rc;
- }
-
- ap->ops->dev_select(ap, 0); /* probably unnecessary */
-
- *class = ata_dev_try_classify(link->device, 1, NULL);
-
- DPRINTK("EXIT, class=%u\n", *class);
- return 0;
+ rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
+ return online ? -EAGAIN : rc;
}
/**
*/
void ata_std_postreset(struct ata_link *link, unsigned int *classes)
{
- struct ata_port *ap = link->ap;
- u32 serror;
-
DPRINTK("ENTER\n");
/* print link status */
sata_print_link_status(link);
- /* clear SError */
- if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
- sata_scr_write(link, SCR_ERROR, serror);
-
- /* is double-select really necessary? */
- if (classes[0] != ATA_DEV_NONE)
- ap->ops->dev_select(ap, 1);
- if (classes[1] != ATA_DEV_NONE)
- ap->ops->dev_select(ap, 0);
-
- /* bail out if no device is present */
- if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
- DPRINTK("EXIT, no device\n");
- return;
- }
-
- /* set up device control */
- if (ap->ioaddr.ctl_addr)
- iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
-
DPRINTK("EXIT\n");
}
/* Devices where NCQ should be avoided */
/* NCQ is slow */
{ "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
+ { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
/* http://thread.gmane.org/gmane.linux.ide/14907 */
{ "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
/* NCQ is broken */
{ "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
{ "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
- { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
{ "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
+ { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
{ "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
{ "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
- /* Drives which do spurious command completion */
- { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
- { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
- { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
- { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
- { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
- { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
- { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
- { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
- { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
- { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
- { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
- { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
- { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
- { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
- { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
- { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
/* devices which puke on READ_NATIVE_MAX */
{ "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
/* Devices which report 1 sector over size HPA */
{ "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
{ "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
+ { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
/* Devices which get the IVB wrong */
{ "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
{ "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
+ { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
/* End Marker */
{ }
}
/**
+ * cable_is_40wire - 40/80/SATA decider
+ * @ap: port to consider
+ *
+ * This function encapsulates the policy for speed management
+ * in one place. At the moment we don't cache the result but
+ * there is a good case for setting ap->cbl to the result when
+ * we are called with unknown cables (and figuring out if it
+ * impacts hotplug at all).
+ *
+ * Return 1 if the cable appears to be 40 wire.
+ */
+
+static int cable_is_40wire(struct ata_port *ap)
+{
+ struct ata_link *link;
+ struct ata_device *dev;
+
+ /* If the controller thinks we are 40 wire, we are */
+ if (ap->cbl == ATA_CBL_PATA40)
+ return 1;
+ /* If the controller thinks we are 80 wire, we are */
+ if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
+ return 0;
+ /* If the system is known to be 40 wire short cable (eg laptop),
+ then we allow 80 wire modes even if the drive isn't sure */
+ if (ap->cbl == ATA_CBL_PATA40_SHORT)
+ return 0;
+ /* If the controller doesn't know we scan
+
+ - Note: We look for all 40 wire detects at this point.
+ Any 80 wire detect is taken to be 80 wire cable
+ because
+ - In many setups only the one drive (slave if present)
+ will give a valid detect
+ - If you have a non detect capable drive you don't
+ want it to colour the choice
+ */
+ ata_port_for_each_link(link, ap) {
+ ata_link_for_each_dev(dev, link) {
+ if (!ata_is_40wire(dev))
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/**
* ata_dev_xfermask - Compute supported xfermask of the given device
* @dev: Device to compute xfermask for
*
*/
if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
/* UDMA/44 or higher would be available */
- if ((ap->cbl == ATA_CBL_PATA40) ||
- (ata_is_40wire(dev) &&
- (ap->cbl == ATA_CBL_PATA_UNK ||
- ap->cbl == ATA_CBL_PATA80))) {
+ if (cable_is_40wire(ap)) {
ata_dev_printk(dev, KERN_WARNING,
"limited to UDMA/33 due to 40-wire cable\n");
xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
tf.feature = SETFEATURES_XFER;
tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
tf.protocol = ATA_PROT_NODATA;
- tf.nsect = dev->xfer_mode;
+ /* If we are using IORDY we must send the mode setting command */
+ if (ata_pio_need_iordy(dev))
+ tf.nsect = dev->xfer_mode;
+ /* If the device has IORDY and the controller does not - turn it off */
+ else if (ata_id_has_iordy(dev->id))
+ tf.nsect = 0x01;
+ else /* In the ancient relic department - skip all of this */
+ return 0;
err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
void ata_sg_clean(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg = qc->__sg;
+ struct scatterlist *sg = qc->sg;
int dir = qc->dma_dir;
- void *pad_buf = NULL;
- WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
WARN_ON(sg == NULL);
- if (qc->flags & ATA_QCFLAG_SINGLE)
- WARN_ON(qc->n_elem > 1);
-
VPRINTK("unmapping %u sg elements\n", qc->n_elem);
- /* if we padded the buffer out to 32-bit bound, and data
- * xfer direction is from-device, we must copy from the
- * pad buffer back into the supplied buffer
- */
- if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
- pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
-
- if (qc->flags & ATA_QCFLAG_SG) {
- if (qc->n_elem)
- dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
- /* restore last sg */
- sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
- if (pad_buf) {
- struct scatterlist *psg = &qc->pad_sgent;
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
- memcpy(addr + psg->offset, pad_buf, qc->pad_len);
- kunmap_atomic(addr, KM_IRQ0);
- }
- } else {
- if (qc->n_elem)
- dma_unmap_single(ap->dev,
- sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
- dir);
- /* restore sg */
- sg->length += qc->pad_len;
- if (pad_buf)
- memcpy(qc->buf_virt + sg->length - qc->pad_len,
- pad_buf, qc->pad_len);
- }
+ if (qc->n_elem)
+ dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
qc->flags &= ~ATA_QCFLAG_DMAMAP;
- qc->__sg = NULL;
-}
-
-/**
- * ata_fill_sg - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int idx;
-
- WARN_ON(qc->__sg == NULL);
- WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
-
- idx = 0;
- ata_for_each_sg(sg, qc) {
- u32 addr, offset;
- u32 sg_len, len;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- ap->prd[idx].addr = cpu_to_le32(addr);
- ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
-
- idx++;
- sg_len -= len;
- addr += len;
- }
- }
-
- if (idx)
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
-}
-
-/**
- * ata_fill_sg_dumb - Fill PCI IDE PRD table
- * @qc: Metadata associated with taskfile to be transferred
- *
- * Fill PCI IDE PRD (scatter-gather) table with segments
- * associated with the current disk command. Perform the fill
- * so that we avoid writing any length 64K records for
- * controllers that don't follow the spec.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- */
-static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int idx;
-
- WARN_ON(qc->__sg == NULL);
- WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
-
- idx = 0;
- ata_for_each_sg(sg, qc) {
- u32 addr, offset;
- u32 sg_len, len, blen;
-
- /* determine if physical DMA addr spans 64K boundary.
- * Note h/w doesn't support 64-bit, so we unconditionally
- * truncate dma_addr_t to u32.
- */
- addr = (u32) sg_dma_address(sg);
- sg_len = sg_dma_len(sg);
-
- while (sg_len) {
- offset = addr & 0xffff;
- len = sg_len;
- if ((offset + sg_len) > 0x10000)
- len = 0x10000 - offset;
-
- blen = len & 0xffff;
- ap->prd[idx].addr = cpu_to_le32(addr);
- if (blen == 0) {
- /* Some PATA chipsets like the CS5530 can't
- cope with 0x0000 meaning 64K as the spec says */
- ap->prd[idx].flags_len = cpu_to_le32(0x8000);
- blen = 0x8000;
- ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
- }
- ap->prd[idx].flags_len = cpu_to_le32(blen);
- VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
-
- idx++;
- sg_len -= len;
- addr += len;
- }
- }
-
- if (idx)
- ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
+ qc->sg = NULL;
}
/**
return ATA_DEFER_LINK;
}
-/**
- * ata_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg(qc);
-}
-
-/**
- * ata_dumb_qc_prep - Prepare taskfile for submission
- * @qc: Metadata associated with taskfile to be prepared
- *
- * Prepare ATA taskfile for submission.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
-{
- if (!(qc->flags & ATA_QCFLAG_DMAMAP))
- return;
-
- ata_fill_sg_dumb(qc);
-}
-
void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
/**
- * ata_sg_init_one - Associate command with memory buffer
- * @qc: Command to be associated
- * @buf: Memory buffer
- * @buflen: Length of memory buffer, in bytes.
- *
- * Initialize the data-related elements of queued_cmd @qc
- * to point to a single memory buffer, @buf of byte length @buflen.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- */
-
-void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
-{
- qc->flags |= ATA_QCFLAG_SINGLE;
-
- qc->__sg = &qc->sgent;
- qc->n_elem = 1;
- qc->orig_n_elem = 1;
- qc->buf_virt = buf;
- qc->nbytes = buflen;
- qc->cursg = qc->__sg;
-
- sg_init_one(&qc->sgent, buf, buflen);
-}
-
-/**
* ata_sg_init - Associate command with scatter-gather table.
* @qc: Command to be associated
* @sg: Scatter-gather table.
* LOCKING:
* spin_lock_irqsave(host lock)
*/
-
void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem)
{
- qc->flags |= ATA_QCFLAG_SG;
- qc->__sg = sg;
+ qc->sg = sg;
qc->n_elem = n_elem;
- qc->orig_n_elem = n_elem;
- qc->cursg = qc->__sg;
-}
-
-/**
- * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
- * @qc: Command with memory buffer to be mapped.
- *
- * DMA-map the memory buffer associated with queued_cmd @qc.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * Zero on success, negative on error.
- */
-
-static int ata_sg_setup_one(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- int dir = qc->dma_dir;
- struct scatterlist *sg = qc->__sg;
- dma_addr_t dma_address;
- int trim_sg = 0;
-
- /* we must lengthen transfers to end on a 32-bit boundary */
- qc->pad_len = sg->length & 3;
- if (qc->pad_len) {
- void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
- struct scatterlist *psg = &qc->pad_sgent;
-
- WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
-
- memset(pad_buf, 0, ATA_DMA_PAD_SZ);
-
- if (qc->tf.flags & ATA_TFLAG_WRITE)
- memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
- qc->pad_len);
-
- sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
- sg_dma_len(psg) = ATA_DMA_PAD_SZ;
- /* trim sg */
- sg->length -= qc->pad_len;
- if (sg->length == 0)
- trim_sg = 1;
-
- DPRINTK("padding done, sg->length=%u pad_len=%u\n",
- sg->length, qc->pad_len);
- }
-
- if (trim_sg) {
- qc->n_elem--;
- goto skip_map;
- }
-
- dma_address = dma_map_single(ap->dev, qc->buf_virt,
- sg->length, dir);
- if (dma_mapping_error(dma_address)) {
- /* restore sg */
- sg->length += qc->pad_len;
- return -1;
- }
-
- sg_dma_address(sg) = dma_address;
- sg_dma_len(sg) = sg->length;
-
-skip_map:
- DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
- qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
- return 0;
+ qc->cursg = qc->sg;
}
/**
* Zero on success, negative on error.
*
*/
-
static int ata_sg_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg = qc->__sg;
- struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
- int n_elem, pre_n_elem, dir, trim_sg = 0;
+ unsigned int n_elem;
VPRINTK("ENTER, ata%u\n", ap->print_id);
- WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
-
- /* we must lengthen transfers to end on a 32-bit boundary */
- qc->pad_len = lsg->length & 3;
- if (qc->pad_len) {
- void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
- struct scatterlist *psg = &qc->pad_sgent;
- unsigned int offset;
-
- WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
-
- memset(pad_buf, 0, ATA_DMA_PAD_SZ);
-
- /*
- * psg->page/offset are used to copy to-be-written
- * data in this function or read data in ata_sg_clean.
- */
- offset = lsg->offset + lsg->length - qc->pad_len;
- sg_init_table(psg, 1);
- sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
- qc->pad_len, offset_in_page(offset));
-
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
- memcpy(pad_buf, addr + psg->offset, qc->pad_len);
- kunmap_atomic(addr, KM_IRQ0);
- }
-
- sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
- sg_dma_len(psg) = ATA_DMA_PAD_SZ;
- /* trim last sg */
- lsg->length -= qc->pad_len;
- if (lsg->length == 0)
- trim_sg = 1;
-
- DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
- qc->n_elem - 1, lsg->length, qc->pad_len);
- }
-
- pre_n_elem = qc->n_elem;
- if (trim_sg && pre_n_elem)
- pre_n_elem--;
-
- if (!pre_n_elem) {
- n_elem = 0;
- goto skip_map;
- }
- dir = qc->dma_dir;
- n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
- if (n_elem < 1) {
- /* restore last sg */
- lsg->length += qc->pad_len;
+ n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
+ if (n_elem < 1)
return -1;
- }
DPRINTK("%d sg elements mapped\n", n_elem);
-skip_map:
qc->n_elem = n_elem;
+ qc->flags |= ATA_QCFLAG_DMAMAP;
return 0;
}
}
/**
- * ata_data_xfer - Transfer data by PIO
- * @adev: device to target
- * @buf: data buffer
- * @buflen: buffer length
- * @write_data: read/write
- *
- * Transfer data from/to the device data register by PIO.
+ * ata_qc_new - Request an available ATA command, for queueing
+ * @ap: Port associated with device @dev
+ * @dev: Device from whom we request an available command structure
*
* LOCKING:
- * Inherited from caller.
+ * None.
*/
-void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
- unsigned int buflen, int write_data)
-{
- struct ata_port *ap = adev->link->ap;
- unsigned int words = buflen >> 1;
- /* Transfer multiple of 2 bytes */
- if (write_data)
- iowrite16_rep(ap->ioaddr.data_addr, buf, words);
- else
- ioread16_rep(ap->ioaddr.data_addr, buf, words);
+static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
+{
+ struct ata_queued_cmd *qc = NULL;
+ unsigned int i;
- /* Transfer trailing 1 byte, if any. */
- if (unlikely(buflen & 0x01)) {
- u16 align_buf[1] = { 0 };
- unsigned char *trailing_buf = buf + buflen - 1;
+ /* no command while frozen */
+ if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
+ return NULL;
- if (write_data) {
- memcpy(align_buf, trailing_buf, 1);
- iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
- } else {
- align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
- memcpy(trailing_buf, align_buf, 1);
+ /* the last tag is reserved for internal command. */
+ for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
+ if (!test_and_set_bit(i, &ap->qc_allocated)) {
+ qc = __ata_qc_from_tag(ap, i);
+ break;
}
- }
-}
-
-/**
- * ata_data_xfer_noirq - Transfer data by PIO
- * @adev: device to target
- * @buf: data buffer
- * @buflen: buffer length
- * @write_data: read/write
- *
- * Transfer data from/to the device data register by PIO. Do the
- * transfer with interrupts disabled.
- *
- * LOCKING:
- * Inherited from caller.
- */
-void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
- unsigned int buflen, int write_data)
-{
- unsigned long flags;
- local_irq_save(flags);
- ata_data_xfer(adev, buf, buflen, write_data);
- local_irq_restore(flags);
-}
-
-
-/**
- * ata_pio_sector - Transfer a sector of data.
- * @qc: Command on going
- *
- * Transfer qc->sect_size bytes of data from/to the ATA device.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-static void ata_pio_sector(struct ata_queued_cmd *qc)
-{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct ata_port *ap = qc->ap;
- struct page *page;
- unsigned int offset;
- unsigned char *buf;
-
- if (qc->curbytes == qc->nbytes - qc->sect_size)
- ap->hsm_task_state = HSM_ST_LAST;
-
- page = sg_page(qc->cursg);
- offset = qc->cursg->offset + qc->cursg_ofs;
-
- /* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
- offset %= PAGE_SIZE;
-
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
- if (PageHighMem(page)) {
- unsigned long flags;
-
- /* FIXME: use a bounce buffer */
- local_irq_save(flags);
- buf = kmap_atomic(page, KM_IRQ0);
-
- /* do the actual data transfer */
- ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
-
- kunmap_atomic(buf, KM_IRQ0);
- local_irq_restore(flags);
- } else {
- buf = page_address(page);
- ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
- }
-
- qc->curbytes += qc->sect_size;
- qc->cursg_ofs += qc->sect_size;
-
- if (qc->cursg_ofs == qc->cursg->length) {
- qc->cursg = sg_next(qc->cursg);
- qc->cursg_ofs = 0;
- }
-}
-
-/**
- * ata_pio_sectors - Transfer one or many sectors.
- * @qc: Command on going
- *
- * Transfer one or many sectors of data from/to the
- * ATA device for the DRQ request.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-static void ata_pio_sectors(struct ata_queued_cmd *qc)
-{
- if (is_multi_taskfile(&qc->tf)) {
- /* READ/WRITE MULTIPLE */
- unsigned int nsect;
-
- WARN_ON(qc->dev->multi_count == 0);
-
- nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
- qc->dev->multi_count);
- while (nsect--)
- ata_pio_sector(qc);
- } else
- ata_pio_sector(qc);
-
- ata_altstatus(qc->ap); /* flush */
-}
-
-/**
- * atapi_send_cdb - Write CDB bytes to hardware
- * @ap: Port to which ATAPI device is attached.
- * @qc: Taskfile currently active
- *
- * When device has indicated its readiness to accept
- * a CDB, this function is called. Send the CDB.
- *
- * LOCKING:
- * caller.
- */
-
-static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
-{
- /* send SCSI cdb */
- DPRINTK("send cdb\n");
- WARN_ON(qc->dev->cdb_len < 12);
-
- ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
- ata_altstatus(ap); /* flush */
-
- switch (qc->tf.protocol) {
- case ATA_PROT_ATAPI:
- ap->hsm_task_state = HSM_ST;
- break;
- case ATA_PROT_ATAPI_NODATA:
- ap->hsm_task_state = HSM_ST_LAST;
- break;
- case ATA_PROT_ATAPI_DMA:
- ap->hsm_task_state = HSM_ST_LAST;
- /* initiate bmdma */
- ap->ops->bmdma_start(qc);
- break;
- }
-}
-
-/**
- * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
- * @qc: Command on going
- * @bytes: number of bytes
- *
- * Transfer Transfer data from/to the ATAPI device.
- *
- * LOCKING:
- * Inherited from caller.
- *
- */
-
-static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
-{
- int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
- struct scatterlist *sg = qc->__sg;
- struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
- struct ata_port *ap = qc->ap;
- struct page *page;
- unsigned char *buf;
- unsigned int offset, count;
- int no_more_sg = 0;
-
- if (qc->curbytes + bytes >= qc->nbytes)
- ap->hsm_task_state = HSM_ST_LAST;
-
-next_sg:
- if (unlikely(no_more_sg)) {
- /*
- * The end of qc->sg is reached and the device expects
- * more data to transfer. In order not to overrun qc->sg
- * and fulfill length specified in the byte count register,
- * - for read case, discard trailing data from the device
- * - for write case, padding zero data to the device
- */
- u16 pad_buf[1] = { 0 };
- unsigned int words = bytes >> 1;
- unsigned int i;
-
- if (words) /* warning if bytes > 1 */
- ata_dev_printk(qc->dev, KERN_WARNING,
- "%u bytes trailing data\n", bytes);
-
- for (i = 0; i < words; i++)
- ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
-
- ap->hsm_task_state = HSM_ST_LAST;
- return;
- }
-
- sg = qc->cursg;
-
- page = sg_page(sg);
- offset = sg->offset + qc->cursg_ofs;
-
- /* get the current page and offset */
- page = nth_page(page, (offset >> PAGE_SHIFT));
- offset %= PAGE_SIZE;
-
- /* don't overrun current sg */
- count = min(sg->length - qc->cursg_ofs, bytes);
-
- /* don't cross page boundaries */
- count = min(count, (unsigned int)PAGE_SIZE - offset);
-
- DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
-
- if (PageHighMem(page)) {
- unsigned long flags;
-
- /* FIXME: use bounce buffer */
- local_irq_save(flags);
- buf = kmap_atomic(page, KM_IRQ0);
-
- /* do the actual data transfer */
- ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
-
- kunmap_atomic(buf, KM_IRQ0);
- local_irq_restore(flags);
- } else {
- buf = page_address(page);
- ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
- }
-
- bytes -= count;
- qc->curbytes += count;
- qc->cursg_ofs += count;
-
- if (qc->cursg_ofs == sg->length) {
- if (qc->cursg == lsg)
- no_more_sg = 1;
-
- qc->cursg = sg_next(qc->cursg);
- qc->cursg_ofs = 0;
- }
-
- if (bytes)
- goto next_sg;
-}
-
-/**
- * atapi_pio_bytes - Transfer data from/to the ATAPI device.
- * @qc: Command on going
- *
- * Transfer Transfer data from/to the ATAPI device.
- *
- * LOCKING:
- * Inherited from caller.
- */
-
-static void atapi_pio_bytes(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- struct ata_device *dev = qc->dev;
- unsigned int ireason, bc_lo, bc_hi, bytes;
- int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
-
- /* Abuse qc->result_tf for temp storage of intermediate TF
- * here to save some kernel stack usage.
- * For normal completion, qc->result_tf is not relevant. For
- * error, qc->result_tf is later overwritten by ata_qc_complete().
- * So, the correctness of qc->result_tf is not affected.
- */
- ap->ops->tf_read(ap, &qc->result_tf);
- ireason = qc->result_tf.nsect;
- bc_lo = qc->result_tf.lbam;
- bc_hi = qc->result_tf.lbah;
- bytes = (bc_hi << 8) | bc_lo;
-
- /* shall be cleared to zero, indicating xfer of data */
- if (ireason & (1 << 0))
- goto err_out;
-
- /* make sure transfer direction matches expected */
- i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
- if (do_write != i_write)
- goto err_out;
-
- VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
-
- __atapi_pio_bytes(qc, bytes);
- ata_altstatus(ap); /* flush */
-
- return;
-
-err_out:
- ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
- qc->err_mask |= AC_ERR_HSM;
- ap->hsm_task_state = HSM_ST_ERR;
-}
-
-/**
- * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
- * @ap: the target ata_port
- * @qc: qc on going
- *
- * RETURNS:
- * 1 if ok in workqueue, 0 otherwise.
- */
-
-static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
-{
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- return 1;
-
- if (ap->hsm_task_state == HSM_ST_FIRST) {
- if (qc->tf.protocol == ATA_PROT_PIO &&
- (qc->tf.flags & ATA_TFLAG_WRITE))
- return 1;
-
- if (is_atapi_taskfile(&qc->tf) &&
- !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- return 1;
- }
-
- return 0;
-}
-
-/**
- * ata_hsm_qc_complete - finish a qc running on standard HSM
- * @qc: Command to complete
- * @in_wq: 1 if called from workqueue, 0 otherwise
- *
- * Finish @qc which is running on standard HSM.
- *
- * LOCKING:
- * If @in_wq is zero, spin_lock_irqsave(host lock).
- * Otherwise, none on entry and grabs host lock.
- */
-static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
-{
- struct ata_port *ap = qc->ap;
- unsigned long flags;
-
- if (ap->ops->error_handler) {
- if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
-
- /* EH might have kicked in while host lock is
- * released.
- */
- qc = ata_qc_from_tag(ap, qc->tag);
- if (qc) {
- if (likely(!(qc->err_mask & AC_ERR_HSM))) {
- ap->ops->irq_on(ap);
- ata_qc_complete(qc);
- } else
- ata_port_freeze(ap);
- }
-
- spin_unlock_irqrestore(ap->lock, flags);
- } else {
- if (likely(!(qc->err_mask & AC_ERR_HSM)))
- ata_qc_complete(qc);
- else
- ata_port_freeze(ap);
- }
- } else {
- if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
- ap->ops->irq_on(ap);
- ata_qc_complete(qc);
- spin_unlock_irqrestore(ap->lock, flags);
- } else
- ata_qc_complete(qc);
- }
-}
-
-/**
- * ata_hsm_move - move the HSM to the next state.
- * @ap: the target ata_port
- * @qc: qc on going
- * @status: current device status
- * @in_wq: 1 if called from workqueue, 0 otherwise
- *
- * RETURNS:
- * 1 when poll next status needed, 0 otherwise.
- */
-int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
- u8 status, int in_wq)
-{
- unsigned long flags = 0;
- int poll_next;
-
- WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
-
- /* Make sure ata_qc_issue_prot() does not throw things
- * like DMA polling into the workqueue. Notice that
- * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
- */
- WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
-
-fsm_start:
- DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
-
- switch (ap->hsm_task_state) {
- case HSM_ST_FIRST:
- /* Send first data block or PACKET CDB */
-
- /* If polling, we will stay in the work queue after
- * sending the data. Otherwise, interrupt handler
- * takes over after sending the data.
- */
- poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
-
- /* check device status */
- if (unlikely((status & ATA_DRQ) == 0)) {
- /* handle BSY=0, DRQ=0 as error */
- if (likely(status & (ATA_ERR | ATA_DF)))
- /* device stops HSM for abort/error */
- qc->err_mask |= AC_ERR_DEV;
- else
- /* HSM violation. Let EH handle this */
- qc->err_mask |= AC_ERR_HSM;
-
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
-
- /* Device should not ask for data transfer (DRQ=1)
- * when it finds something wrong.
- * We ignore DRQ here and stop the HSM by
- * changing hsm_task_state to HSM_ST_ERR and
- * let the EH abort the command or reset the device.
- */
- if (unlikely(status & (ATA_ERR | ATA_DF))) {
- /* Some ATAPI tape drives forget to clear the ERR bit
- * when doing the next command (mostly request sense).
- * We ignore ERR here to workaround and proceed sending
- * the CDB.
- */
- if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
- ata_port_printk(ap, KERN_WARNING,
- "DRQ=1 with device error, "
- "dev_stat 0x%X\n", status);
- qc->err_mask |= AC_ERR_HSM;
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
- }
-
- /* Send the CDB (atapi) or the first data block (ata pio out).
- * During the state transition, interrupt handler shouldn't
- * be invoked before the data transfer is complete and
- * hsm_task_state is changed. Hence, the following locking.
- */
- if (in_wq)
- spin_lock_irqsave(ap->lock, flags);
-
- if (qc->tf.protocol == ATA_PROT_PIO) {
- /* PIO data out protocol.
- * send first data block.
- */
-
- /* ata_pio_sectors() might change the state
- * to HSM_ST_LAST. so, the state is changed here
- * before ata_pio_sectors().
- */
- ap->hsm_task_state = HSM_ST;
- ata_pio_sectors(qc);
- } else
- /* send CDB */
- atapi_send_cdb(ap, qc);
-
- if (in_wq)
- spin_unlock_irqrestore(ap->lock, flags);
-
- /* if polling, ata_pio_task() handles the rest.
- * otherwise, interrupt handler takes over from here.
- */
- break;
-
- case HSM_ST:
- /* complete command or read/write the data register */
- if (qc->tf.protocol == ATA_PROT_ATAPI) {
- /* ATAPI PIO protocol */
- if ((status & ATA_DRQ) == 0) {
- /* No more data to transfer or device error.
- * Device error will be tagged in HSM_ST_LAST.
- */
- ap->hsm_task_state = HSM_ST_LAST;
- goto fsm_start;
- }
-
- /* Device should not ask for data transfer (DRQ=1)
- * when it finds something wrong.
- * We ignore DRQ here and stop the HSM by
- * changing hsm_task_state to HSM_ST_ERR and
- * let the EH abort the command or reset the device.
- */
- if (unlikely(status & (ATA_ERR | ATA_DF))) {
- ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
- "device error, dev_stat 0x%X\n",
- status);
- qc->err_mask |= AC_ERR_HSM;
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
-
- atapi_pio_bytes(qc);
-
- if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
- /* bad ireason reported by device */
- goto fsm_start;
-
- } else {
- /* ATA PIO protocol */
- if (unlikely((status & ATA_DRQ) == 0)) {
- /* handle BSY=0, DRQ=0 as error */
- if (likely(status & (ATA_ERR | ATA_DF)))
- /* device stops HSM for abort/error */
- qc->err_mask |= AC_ERR_DEV;
- else
- /* HSM violation. Let EH handle this.
- * Phantom devices also trigger this
- * condition. Mark hint.
- */
- qc->err_mask |= AC_ERR_HSM |
- AC_ERR_NODEV_HINT;
-
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
-
- /* For PIO reads, some devices may ask for
- * data transfer (DRQ=1) alone with ERR=1.
- * We respect DRQ here and transfer one
- * block of junk data before changing the
- * hsm_task_state to HSM_ST_ERR.
- *
- * For PIO writes, ERR=1 DRQ=1 doesn't make
- * sense since the data block has been
- * transferred to the device.
- */
- if (unlikely(status & (ATA_ERR | ATA_DF))) {
- /* data might be corrputed */
- qc->err_mask |= AC_ERR_DEV;
-
- if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
- ata_pio_sectors(qc);
- status = ata_wait_idle(ap);
- }
-
- if (status & (ATA_BUSY | ATA_DRQ))
- qc->err_mask |= AC_ERR_HSM;
-
- /* ata_pio_sectors() might change the
- * state to HSM_ST_LAST. so, the state
- * is changed after ata_pio_sectors().
- */
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
-
- ata_pio_sectors(qc);
-
- if (ap->hsm_task_state == HSM_ST_LAST &&
- (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
- /* all data read */
- status = ata_wait_idle(ap);
- goto fsm_start;
- }
- }
-
- poll_next = 1;
- break;
-
- case HSM_ST_LAST:
- if (unlikely(!ata_ok(status))) {
- qc->err_mask |= __ac_err_mask(status);
- ap->hsm_task_state = HSM_ST_ERR;
- goto fsm_start;
- }
-
- /* no more data to transfer */
- DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
- ap->print_id, qc->dev->devno, status);
-
- WARN_ON(qc->err_mask);
-
- ap->hsm_task_state = HSM_ST_IDLE;
-
- /* complete taskfile transaction */
- ata_hsm_qc_complete(qc, in_wq);
-
- poll_next = 0;
- break;
-
- case HSM_ST_ERR:
- /* make sure qc->err_mask is available to
- * know what's wrong and recover
- */
- WARN_ON(qc->err_mask == 0);
-
- ap->hsm_task_state = HSM_ST_IDLE;
-
- /* complete taskfile transaction */
- ata_hsm_qc_complete(qc, in_wq);
-
- poll_next = 0;
- break;
- default:
- poll_next = 0;
- BUG();
- }
-
- return poll_next;
-}
-
-static void ata_pio_task(struct work_struct *work)
-{
- struct ata_port *ap =
- container_of(work, struct ata_port, port_task.work);
- struct ata_queued_cmd *qc = ap->port_task_data;
- u8 status;
- int poll_next;
-
-fsm_start:
- WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
-
- /*
- * This is purely heuristic. This is a fast path.
- * Sometimes when we enter, BSY will be cleared in
- * a chk-status or two. If not, the drive is probably seeking
- * or something. Snooze for a couple msecs, then
- * chk-status again. If still busy, queue delayed work.
- */
- status = ata_busy_wait(ap, ATA_BUSY, 5);
- if (status & ATA_BUSY) {
- msleep(2);
- status = ata_busy_wait(ap, ATA_BUSY, 10);
- if (status & ATA_BUSY) {
- ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
- return;
- }
- }
-
- /* move the HSM */
- poll_next = ata_hsm_move(ap, qc, status, 1);
-
- /* another command or interrupt handler
- * may be running at this point.
- */
- if (poll_next)
- goto fsm_start;
-}
-
-/**
- * ata_qc_new - Request an available ATA command, for queueing
- * @ap: Port associated with device @dev
- * @dev: Device from whom we request an available command structure
- *
- * LOCKING:
- * None.
- */
-
-static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
-{
- struct ata_queued_cmd *qc = NULL;
- unsigned int i;
-
- /* no command while frozen */
- if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
- return NULL;
-
- /* the last tag is reserved for internal command. */
- for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
- if (!test_and_set_bit(i, &ap->qc_allocated)) {
- qc = __ata_qc_from_tag(ap, i);
- break;
- }
-
- if (qc)
- qc->tag = i;
-
- return qc;
+
+ if (qc)
+ qc->tag = i;
+
+ return qc;
}
/**
struct ata_port *ap = qc->ap;
qc->result_tf.flags = qc->tf.flags;
- ap->ops->tf_read(ap, &qc->result_tf);
+ ap->ops->qc_fill_rtf(qc);
+}
+
+static void ata_verify_xfer(struct ata_queued_cmd *qc)
+{
+ struct ata_device *dev = qc->dev;
+
+ if (ata_tag_internal(qc->tag))
+ return;
+
+ if (ata_is_nodata(qc->tf.protocol))
+ return;
+
+ if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
+ return;
+
+ dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
}
/**
break;
}
+ if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
+ ata_verify_xfer(qc);
+
__ata_qc_complete(qc);
} else {
if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
* ata_qc_complete_multiple - Complete multiple qcs successfully
* @ap: port in question
* @qc_active: new qc_active mask
- * @finish_qc: LLDD callback invoked before completing a qc
*
* Complete in-flight commands. This functions is meant to be
* called from low-level driver's interrupt routine to complete
* RETURNS:
* Number of completed commands on success, -errno otherwise.
*/
-int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
- void (*finish_qc)(struct ata_queued_cmd *))
+int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
{
int nr_done = 0;
u32 done_mask;
struct ata_queued_cmd *qc;
if (!(done_mask & (1 << i)))
- continue;
-
- if ((qc = ata_qc_from_tag(ap, i))) {
- if (finish_qc)
- finish_qc(qc);
- ata_qc_complete(qc);
- nr_done++;
- }
- }
-
- return nr_done;
-}
-
-static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- switch (qc->tf.protocol) {
- case ATA_PROT_NCQ:
- case ATA_PROT_DMA:
- case ATA_PROT_ATAPI_DMA:
- return 1;
-
- case ATA_PROT_ATAPI:
- case ATA_PROT_PIO:
- if (ap->flags & ATA_FLAG_PIO_DMA)
- return 1;
-
- /* fall through */
+ continue;
- default:
- return 0;
+ if ((qc = ata_qc_from_tag(ap, i))) {
+ ata_qc_complete(qc);
+ nr_done++;
+ }
}
- /* never reached */
+ return nr_done;
}
/**
{
struct ata_port *ap = qc->ap;
struct ata_link *link = qc->dev->link;
+ u8 prot = qc->tf.protocol;
/* Make sure only one non-NCQ command is outstanding. The
* check is skipped for old EH because it reuses active qc to
*/
WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
- if (qc->tf.protocol == ATA_PROT_NCQ) {
+ if (ata_is_ncq(prot)) {
WARN_ON(link->sactive & (1 << qc->tag));
if (!link->sactive)
qc->flags |= ATA_QCFLAG_ACTIVE;
ap->qc_active |= 1 << qc->tag;
- if (ata_should_dma_map(qc)) {
- if (qc->flags & ATA_QCFLAG_SG) {
- if (ata_sg_setup(qc))
- goto sg_err;
- } else if (qc->flags & ATA_QCFLAG_SINGLE) {
- if (ata_sg_setup_one(qc))
- goto sg_err;
- }
- } else {
- qc->flags &= ~ATA_QCFLAG_DMAMAP;
- }
+ /* We guarantee to LLDs that they will have at least one
+ * non-zero sg if the command is a data command.
+ */
+ BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
+
+ if (ata_is_dma(prot) || (ata_is_pio(prot) &&
+ (ap->flags & ATA_FLAG_PIO_DMA)))
+ if (ata_sg_setup(qc))
+ goto sg_err;
- /* if device is sleeping, schedule softreset and abort the link */
+ /* if device is sleeping, schedule reset and abort the link */
if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
- link->eh_info.action |= ATA_EH_SOFTRESET;
+ link->eh_info.action |= ATA_EH_RESET;
ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
ata_link_abort(link);
return;
return;
sg_err:
- qc->flags &= ~ATA_QCFLAG_DMAMAP;
qc->err_mask |= AC_ERR_SYSTEM;
err:
ata_qc_complete(qc);
}
/**
- * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
- * @qc: command to issue to device
- *
- * Using various libata functions and hooks, this function
- * starts an ATA command. ATA commands are grouped into
- * classes called "protocols", and issuing each type of protocol
- * is slightly different.
- *
- * May be used as the qc_issue() entry in ata_port_operations.
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * Zero on success, AC_ERR_* mask on failure
- */
-
-unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
-
- /* Use polling pio if the LLD doesn't handle
- * interrupt driven pio and atapi CDB interrupt.
- */
- if (ap->flags & ATA_FLAG_PIO_POLLING) {
- switch (qc->tf.protocol) {
- case ATA_PROT_PIO:
- case ATA_PROT_NODATA:
- case ATA_PROT_ATAPI:
- case ATA_PROT_ATAPI_NODATA:
- qc->tf.flags |= ATA_TFLAG_POLLING;
- break;
- case ATA_PROT_ATAPI_DMA:
- if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
- /* see ata_dma_blacklisted() */
- BUG();
- break;
- default:
- break;
- }
- }
-
- /* select the device */
- ata_dev_select(ap, qc->dev->devno, 1, 0);
-
- /* start the command */
- switch (qc->tf.protocol) {
- case ATA_PROT_NODATA:
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_qc_set_polling(qc);
-
- ata_tf_to_host(ap, &qc->tf);
- ap->hsm_task_state = HSM_ST_LAST;
-
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
-
- break;
-
- case ATA_PROT_DMA:
- WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->ops->bmdma_start(qc); /* initiate bmdma */
- ap->hsm_task_state = HSM_ST_LAST;
- break;
-
- case ATA_PROT_PIO:
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_qc_set_polling(qc);
-
- ata_tf_to_host(ap, &qc->tf);
-
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /* PIO data out protocol */
- ap->hsm_task_state = HSM_ST_FIRST;
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
-
- /* always send first data block using
- * the ata_pio_task() codepath.
- */
- } else {
- /* PIO data in protocol */
- ap->hsm_task_state = HSM_ST;
-
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
-
- /* if polling, ata_pio_task() handles the rest.
- * otherwise, interrupt handler takes over from here.
- */
- }
-
- break;
-
- case ATA_PROT_ATAPI:
- case ATA_PROT_ATAPI_NODATA:
- if (qc->tf.flags & ATA_TFLAG_POLLING)
- ata_qc_set_polling(qc);
-
- ata_tf_to_host(ap, &qc->tf);
-
- ap->hsm_task_state = HSM_ST_FIRST;
-
- /* send cdb by polling if no cdb interrupt */
- if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
- (qc->tf.flags & ATA_TFLAG_POLLING))
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
- break;
-
- case ATA_PROT_ATAPI_DMA:
- WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
-
- ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
- ap->ops->bmdma_setup(qc); /* set up bmdma */
- ap->hsm_task_state = HSM_ST_FIRST;
-
- /* send cdb by polling if no cdb interrupt */
- if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- ata_port_queue_task(ap, ata_pio_task, qc, 0);
- break;
-
- default:
- WARN_ON(1);
- return AC_ERR_SYSTEM;
- }
-
- return 0;
-}
-
-/**
- * ata_host_intr - Handle host interrupt for given (port, task)
- * @ap: Port on which interrupt arrived (possibly...)
- * @qc: Taskfile currently active in engine
- *
- * Handle host interrupt for given queued command. Currently,
- * only DMA interrupts are handled. All other commands are
- * handled via polling with interrupts disabled (nIEN bit).
- *
- * LOCKING:
- * spin_lock_irqsave(host lock)
- *
- * RETURNS:
- * One if interrupt was handled, zero if not (shared irq).
- */
-
-inline unsigned int ata_host_intr(struct ata_port *ap,
- struct ata_queued_cmd *qc)
-{
- struct ata_eh_info *ehi = &ap->link.eh_info;
- u8 status, host_stat = 0;
-
- VPRINTK("ata%u: protocol %d task_state %d\n",
- ap->print_id, qc->tf.protocol, ap->hsm_task_state);
-
- /* Check whether we are expecting interrupt in this state */
- switch (ap->hsm_task_state) {
- case HSM_ST_FIRST:
- /* Some pre-ATAPI-4 devices assert INTRQ
- * at this state when ready to receive CDB.
- */
-
- /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
- * The flag was turned on only for atapi devices.
- * No need to check is_atapi_taskfile(&qc->tf) again.
- */
- if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
- goto idle_irq;
- break;
- case HSM_ST_LAST:
- if (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
- /* check status of DMA engine */
- host_stat = ap->ops->bmdma_status(ap);
- VPRINTK("ata%u: host_stat 0x%X\n",
- ap->print_id, host_stat);
-
- /* if it's not our irq... */
- if (!(host_stat & ATA_DMA_INTR))
- goto idle_irq;
-
- /* before we do anything else, clear DMA-Start bit */
- ap->ops->bmdma_stop(qc);
-
- if (unlikely(host_stat & ATA_DMA_ERR)) {
- /* error when transfering data to/from memory */
- qc->err_mask |= AC_ERR_HOST_BUS;
- ap->hsm_task_state = HSM_ST_ERR;
- }
- }
- break;
- case HSM_ST:
- break;
- default:
- goto idle_irq;
- }
-
- /* check altstatus */
- status = ata_altstatus(ap);
- if (status & ATA_BUSY)
- goto idle_irq;
-
- /* check main status, clearing INTRQ */
- status = ata_chk_status(ap);
- if (unlikely(status & ATA_BUSY))
- goto idle_irq;
-
- /* ack bmdma irq events */
- ap->ops->irq_clear(ap);
-
- ata_hsm_move(ap, qc, status, 0);
-
- if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
- qc->tf.protocol == ATA_PROT_ATAPI_DMA))
- ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
-
- return 1; /* irq handled */
-
-idle_irq:
- ap->stats.idle_irq++;
-
-#ifdef ATA_IRQ_TRAP
- if ((ap->stats.idle_irq % 1000) == 0) {
- ata_chk_status(ap);
- ap->ops->irq_clear(ap);
- ata_port_printk(ap, KERN_WARNING, "irq trap\n");
- return 1;
- }
-#endif
- return 0; /* irq not handled */
-}
-
-/**
- * ata_interrupt - Default ATA host interrupt handler
- * @irq: irq line (unused)
- * @dev_instance: pointer to our ata_host information structure
- *
- * Default interrupt handler for PCI IDE devices. Calls
- * ata_host_intr() for each port that is not disabled.
- *
- * LOCKING:
- * Obtains host lock during operation.
- *
- * RETURNS:
- * IRQ_NONE or IRQ_HANDLED.
- */
-
-irqreturn_t ata_interrupt(int irq, void *dev_instance)
-{
- struct ata_host *host = dev_instance;
- unsigned int i;
- unsigned int handled = 0;
- unsigned long flags;
-
- /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
- spin_lock_irqsave(&host->lock, flags);
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap;
-
- ap = host->ports[i];
- if (ap &&
- !(ap->flags & ATA_FLAG_DISABLED)) {
- struct ata_queued_cmd *qc;
-
- qc = ata_qc_from_tag(ap, ap->link.active_tag);
- if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
- (qc->flags & ATA_QCFLAG_ACTIVE))
- handled |= ata_host_intr(ap, qc);
- }
- }
-
- spin_unlock_irqrestore(&host->lock, flags);
-
- return IRQ_RETVAL(handled);
-}
-
-/**
* sata_scr_valid - test whether SCRs are accessible
* @link: ATA link to test SCR accessibility for
*
return 0;
}
-int ata_flush_cache(struct ata_device *dev)
-{
- unsigned int err_mask;
- u8 cmd;
-
- if (!ata_try_flush_cache(dev))
- return 0;
-
- if (dev->flags & ATA_DFLAG_FLUSH_EXT)
- cmd = ATA_CMD_FLUSH_EXT;
- else
- cmd = ATA_CMD_FLUSH;
-
- /* This is wrong. On a failed flush we get back the LBA of the lost
- sector and we should (assuming it wasn't aborted as unknown) issue
- a further flush command to continue the writeback until it
- does not error */
- err_mask = ata_do_simple_cmd(dev, cmd);
- if (err_mask) {
- ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
- return -EIO;
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM
static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
*/
void ata_host_resume(struct ata_host *host)
{
- ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
+ ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
int ata_port_start(struct ata_port *ap)
{
struct device *dev = ap->dev;
- int rc;
ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
GFP_KERNEL);
if (!ap->prd)
return -ENOMEM;
- rc = ata_pad_alloc(ap, dev);
- if (rc)
- return rc;
-
- DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
- (unsigned long long)ap->prd_dma);
return 0;
}
*/
int sata_link_init_spd(struct ata_link *link)
{
- u32 scontrol, spd;
+ u32 scontrol;
+ u8 spd;
int rc;
rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
if (spd)
link->hw_sata_spd_limit &= (1 << spd) - 1;
+ ata_force_spd_limit(link);
+
link->sata_spd_limit = link->hw_sata_spd_limit;
return 0;
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
- INIT_DELAYED_WORK(&ap->port_task, NULL);
+#ifdef CONFIG_ATA_SFF
+ INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
+#endif
INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
INIT_LIST_HEAD(&ap->eh_done_q);
if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
host->ops = pi->port_ops;
- if (!host->private_data && pi->private_data)
- host->private_data = pi->private_data;
}
return host;
}
/**
+ * ata_finalize_port_ops - finalize ata_port_operations
+ * @ops: ata_port_operations to finalize
+ *
+ * An ata_port_operations can inherit from another ops and that
+ * ops can again inherit from another. This can go on as many
+ * times as necessary as long as there is no loop in the
+ * inheritance chain.
+ *
+ * Ops tables are finalized when the host is started. NULL or
+ * unspecified entries are inherited from the closet ancestor
+ * which has the method and the entry is populated with it.
+ * After finalization, the ops table directly points to all the
+ * methods and ->inherits is no longer necessary and cleared.
+ *
+ * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
+ *
+ * LOCKING:
+ * None.
+ */
+static void ata_finalize_port_ops(struct ata_port_operations *ops)
+{
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ const struct ata_port_operations *cur;
+ void **begin = (void **)ops;
+ void **end = (void **)&ops->inherits;
+ void **pp;
+
+ if (!ops || !ops->inherits)
+ return;
+
+ spin_lock(&lock);
+
+ for (cur = ops->inherits; cur; cur = cur->inherits) {
+ void **inherit = (void **)cur;
+
+ for (pp = begin; pp < end; pp++, inherit++)
+ if (!*pp)
+ *pp = *inherit;
+ }
+
+ for (pp = begin; pp < end; pp++)
+ if (IS_ERR(*pp))
+ *pp = NULL;
+
+ ops->inherits = NULL;
+
+ spin_unlock(&lock);
+}
+
+/**
* ata_host_start - start and freeze ports of an ATA host
* @host: ATA host to start ports for
*
if (host->flags & ATA_HOST_STARTED)
return 0;
+ ata_finalize_port_ops(host->ops);
+
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
+ ata_finalize_port_ops(ap->ops);
+
if (!host->ops && !ata_port_is_dummy(ap))
host->ops = ap->ops;
if (ap->ops->port_start) {
rc = ap->ops->port_start(ap);
if (rc) {
- ata_port_printk(ap, KERN_ERR, "failed to "
- "start port (errno=%d)\n", rc);
+ if (rc != -ENODEV)
+ dev_printk(KERN_ERR, host->dev,
+ "failed to start port %d "
+ "(errno=%d)\n", i, rc);
goto err_out;
}
}
-
ata_eh_freeze_port(ap);
}
*/
/* KILLME - the only user left is ipr */
void ata_host_init(struct ata_host *host, struct device *dev,
- unsigned long flags, const struct ata_port_operations *ops)
+ unsigned long flags, struct ata_port_operations *ops)
{
spin_lock_init(&host->lock);
host->dev = dev;
DPRINTK("probe begin\n");
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- int rc;
/* probe */
if (ap->ops->error_handler) {
/* kick EH for boot probing */
spin_lock_irqsave(ap->lock, flags);
- ehi->probe_mask =
- (1 << ata_link_max_devices(&ap->link)) - 1;
- ehi->action |= ATA_EH_SOFTRESET;
+ ehi->probe_mask |= ATA_ALL_DEVICES;
+ ehi->action |= ATA_EH_RESET;
ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
ap->pflags &= ~ATA_PFLAG_INITIALIZING;
ata_port_wait_eh(ap);
- /* EH is now guaranteed to see UNLOADING, so no new device
- * will be attached. Disable all existing devices.
+ /* EH is now guaranteed to see UNLOADING - EH context belongs
+ * to us. Disable all existing devices.
*/
- spin_lock_irqsave(ap->lock, flags);
-
ata_port_for_each_link(link, ap) {
ata_link_for_each_dev(dev, link)
ata_dev_disable(dev);
}
- spin_unlock_irqrestore(ap->lock, flags);
-
/* Final freeze & EH. All in-flight commands are aborted. EH
* will be skipped and retrials will be terminated with bad
* target.
for (i = 0; i < host->n_ports; i++)
ata_port_detach(host->ports[i]);
-}
-
-/**
- * ata_std_ports - initialize ioaddr with standard port offsets.
- * @ioaddr: IO address structure to be initialized
- *
- * Utility function which initializes data_addr, error_addr,
- * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
- * device_addr, status_addr, and command_addr to standard offsets
- * relative to cmd_addr.
- *
- * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
- */
-void ata_std_ports(struct ata_ioports *ioaddr)
-{
- ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
- ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
- ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
- ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
- ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
- ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
- ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
- ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
- ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
- ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
+ /* the host is dead now, dissociate ACPI */
+ ata_acpi_dissociate(host);
}
-
#ifdef CONFIG_PCI
/**
pci_save_state(pdev);
pci_disable_device(pdev);
- if (mesg.event == PM_EVENT_SUSPEND)
+ if (mesg.event & PM_EVENT_SLEEP)
pci_set_power_state(pdev, PCI_D3hot);
}
#endif /* CONFIG_PCI */
+static int __init ata_parse_force_one(char **cur,
+ struct ata_force_ent *force_ent,
+ const char **reason)
+{
+ /* FIXME: Currently, there's no way to tag init const data and
+ * using __initdata causes build failure on some versions of
+ * gcc. Once __initdataconst is implemented, add const to the
+ * following structure.
+ */
+ static struct ata_force_param force_tbl[] __initdata = {
+ { "40c", .cbl = ATA_CBL_PATA40 },
+ { "80c", .cbl = ATA_CBL_PATA80 },
+ { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
+ { "unk", .cbl = ATA_CBL_PATA_UNK },
+ { "ign", .cbl = ATA_CBL_PATA_IGN },
+ { "sata", .cbl = ATA_CBL_SATA },
+ { "1.5Gbps", .spd_limit = 1 },
+ { "3.0Gbps", .spd_limit = 2 },
+ { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
+ { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
+ { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
+ { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
+ { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
+ { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
+ { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
+ { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
+ { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
+ { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
+ { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
+ { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
+ { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
+ { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
+ { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
+ { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
+ { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
+ { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
+ { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
+ { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
+ { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
+ { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
+ };
+ char *start = *cur, *p = *cur;
+ char *id, *val, *endp;
+ const struct ata_force_param *match_fp = NULL;
+ int nr_matches = 0, i;
+
+ /* find where this param ends and update *cur */
+ while (*p != '\0' && *p != ',')
+ p++;
+
+ if (*p == '\0')
+ *cur = p;
+ else
+ *cur = p + 1;
+
+ *p = '\0';
+
+ /* parse */
+ p = strchr(start, ':');
+ if (!p) {
+ val = strstrip(start);
+ goto parse_val;
+ }
+ *p = '\0';
+
+ id = strstrip(start);
+ val = strstrip(p + 1);
+
+ /* parse id */
+ p = strchr(id, '.');
+ if (p) {
+ *p++ = '\0';
+ force_ent->device = simple_strtoul(p, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid device";
+ return -EINVAL;
+ }
+ }
+
+ force_ent->port = simple_strtoul(id, &endp, 10);
+ if (p == endp || *endp != '\0') {
+ *reason = "invalid port/link";
+ return -EINVAL;
+ }
+
+ parse_val:
+ /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
+ for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
+ const struct ata_force_param *fp = &force_tbl[i];
+
+ if (strncasecmp(val, fp->name, strlen(val)))
+ continue;
+
+ nr_matches++;
+ match_fp = fp;
+
+ if (strcasecmp(val, fp->name) == 0) {
+ nr_matches = 1;
+ break;
+ }
+ }
+
+ if (!nr_matches) {
+ *reason = "unknown value";
+ return -EINVAL;
+ }
+ if (nr_matches > 1) {
+ *reason = "ambigious value";
+ return -EINVAL;
+ }
+
+ force_ent->param = *match_fp;
+
+ return 0;
+}
+
+static void __init ata_parse_force_param(void)
+{
+ int idx = 0, size = 1;
+ int last_port = -1, last_device = -1;
+ char *p, *cur, *next;
+
+ /* calculate maximum number of params and allocate force_tbl */
+ for (p = ata_force_param_buf; *p; p++)
+ if (*p == ',')
+ size++;
+
+ ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
+ if (!ata_force_tbl) {
+ printk(KERN_WARNING "ata: failed to extend force table, "
+ "libata.force ignored\n");
+ return;
+ }
+
+ /* parse and populate the table */
+ for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
+ const char *reason = "";
+ struct ata_force_ent te = { .port = -1, .device = -1 };
+
+ next = cur;
+ if (ata_parse_force_one(&next, &te, &reason)) {
+ printk(KERN_WARNING "ata: failed to parse force "
+ "parameter \"%s\" (%s)\n",
+ cur, reason);
+ continue;
+ }
+
+ if (te.port == -1) {
+ te.port = last_port;
+ te.device = last_device;
+ }
+
+ ata_force_tbl[idx++] = te;
+
+ last_port = te.port;
+ last_device = te.device;
+ }
+
+ ata_force_tbl_size = idx;
+}
static int __init ata_init(void)
{
ata_probe_timeout *= HZ;
+
+ ata_parse_force_param();
+
ata_wq = create_workqueue("ata");
if (!ata_wq)
return -ENOMEM;
static void __exit ata_exit(void)
{
+ kfree(ata_force_tbl);
destroy_workqueue(ata_wq);
destroy_workqueue(ata_aux_wq);
}
/*
* Dummy port_ops
*/
-static void ata_dummy_noret(struct ata_port *ap) { }
-static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
-static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
-
-static u8 ata_dummy_check_status(struct ata_port *ap)
+static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
{
- return ATA_DRDY;
+ return AC_ERR_SYSTEM;
}
-static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
+static void ata_dummy_error_handler(struct ata_port *ap)
{
- return AC_ERR_SYSTEM;
+ /* truly dummy */
}
-const struct ata_port_operations ata_dummy_port_ops = {
- .check_status = ata_dummy_check_status,
- .check_altstatus = ata_dummy_check_status,
- .dev_select = ata_noop_dev_select,
+struct ata_port_operations ata_dummy_port_ops = {
.qc_prep = ata_noop_qc_prep,
.qc_issue = ata_dummy_qc_issue,
- .freeze = ata_dummy_noret,
- .thaw = ata_dummy_noret,
- .error_handler = ata_dummy_noret,
- .post_internal_cmd = ata_dummy_qc_noret,
- .irq_clear = ata_dummy_noret,
- .port_start = ata_dummy_ret0,
- .port_stop = ata_dummy_noret,
+ .error_handler = ata_dummy_error_handler,
};
const struct ata_port_info ata_dummy_port_info = {
EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
EXPORT_SYMBOL_GPL(sata_deb_timing_long);
+EXPORT_SYMBOL_GPL(ata_base_port_ops);
+EXPORT_SYMBOL_GPL(sata_port_ops);
EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
EXPORT_SYMBOL_GPL(ata_dummy_port_info);
EXPORT_SYMBOL_GPL(ata_std_bios_param);
-EXPORT_SYMBOL_GPL(ata_std_ports);
EXPORT_SYMBOL_GPL(ata_host_init);
EXPORT_SYMBOL_GPL(ata_host_alloc);
EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
EXPORT_SYMBOL_GPL(ata_host_activate);
EXPORT_SYMBOL_GPL(ata_host_detach);
EXPORT_SYMBOL_GPL(ata_sg_init);
-EXPORT_SYMBOL_GPL(ata_sg_init_one);
-EXPORT_SYMBOL_GPL(ata_hsm_move);
EXPORT_SYMBOL_GPL(ata_qc_complete);
EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
-EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
-EXPORT_SYMBOL_GPL(ata_tf_load);
-EXPORT_SYMBOL_GPL(ata_tf_read);
-EXPORT_SYMBOL_GPL(ata_noop_dev_select);
-EXPORT_SYMBOL_GPL(ata_std_dev_select);
-EXPORT_SYMBOL_GPL(sata_print_link_status);
+EXPORT_SYMBOL_GPL(atapi_cmd_type);
EXPORT_SYMBOL_GPL(ata_tf_to_fis);
EXPORT_SYMBOL_GPL(ata_tf_from_fis);
-EXPORT_SYMBOL_GPL(ata_check_status);
-EXPORT_SYMBOL_GPL(ata_altstatus);
-EXPORT_SYMBOL_GPL(ata_exec_command);
+EXPORT_SYMBOL_GPL(ata_pack_xfermask);
+EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
+EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
+EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
+EXPORT_SYMBOL_GPL(ata_mode_string);
+EXPORT_SYMBOL_GPL(ata_id_xfermask);
EXPORT_SYMBOL_GPL(ata_port_start);
-EXPORT_SYMBOL_GPL(ata_sff_port_start);
-EXPORT_SYMBOL_GPL(ata_interrupt);
EXPORT_SYMBOL_GPL(ata_do_set_mode);
-EXPORT_SYMBOL_GPL(ata_data_xfer);
-EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
EXPORT_SYMBOL_GPL(ata_std_qc_defer);
-EXPORT_SYMBOL_GPL(ata_qc_prep);
-EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
-EXPORT_SYMBOL_GPL(ata_bmdma_setup);
-EXPORT_SYMBOL_GPL(ata_bmdma_start);
-EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
-EXPORT_SYMBOL_GPL(ata_bmdma_status);
-EXPORT_SYMBOL_GPL(ata_bmdma_stop);
-EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
-EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
-EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
-EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
-EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
EXPORT_SYMBOL_GPL(ata_port_probe);
EXPORT_SYMBOL_GPL(ata_dev_disable);
EXPORT_SYMBOL_GPL(sata_set_spd);
+EXPORT_SYMBOL_GPL(ata_wait_after_reset);
EXPORT_SYMBOL_GPL(sata_link_debounce);
EXPORT_SYMBOL_GPL(sata_link_resume);
-EXPORT_SYMBOL_GPL(ata_bus_reset);
EXPORT_SYMBOL_GPL(ata_std_prereset);
-EXPORT_SYMBOL_GPL(ata_std_softreset);
EXPORT_SYMBOL_GPL(sata_link_hardreset);
EXPORT_SYMBOL_GPL(sata_std_hardreset);
EXPORT_SYMBOL_GPL(ata_std_postreset);
EXPORT_SYMBOL_GPL(ata_port_disable);
EXPORT_SYMBOL_GPL(ata_ratelimit);
EXPORT_SYMBOL_GPL(ata_wait_register);
-EXPORT_SYMBOL_GPL(ata_busy_sleep);
-EXPORT_SYMBOL_GPL(ata_wait_after_reset);
-EXPORT_SYMBOL_GPL(ata_wait_ready);
-EXPORT_SYMBOL_GPL(ata_port_queue_task);
EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
-EXPORT_SYMBOL_GPL(ata_host_intr);
EXPORT_SYMBOL_GPL(sata_scr_valid);
EXPORT_SYMBOL_GPL(sata_scr_read);
EXPORT_SYMBOL_GPL(sata_scr_write);
#endif /* CONFIG_PM */
EXPORT_SYMBOL_GPL(ata_id_string);
EXPORT_SYMBOL_GPL(ata_id_c_string);
-EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
EXPORT_SYMBOL_GPL(ata_scsi_simulate);
EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
+EXPORT_SYMBOL_GPL(ata_timing_find_mode);
EXPORT_SYMBOL_GPL(ata_timing_compute);
EXPORT_SYMBOL_GPL(ata_timing_merge);
+EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
#ifdef CONFIG_PCI
EXPORT_SYMBOL_GPL(pci_test_config_bits);
-EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
-EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
-EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
-EXPORT_SYMBOL_GPL(ata_pci_init_one);
EXPORT_SYMBOL_GPL(ata_pci_remove_one);
#ifdef CONFIG_PM
EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
EXPORT_SYMBOL_GPL(ata_pci_device_resume);
#endif /* CONFIG_PM */
-EXPORT_SYMBOL_GPL(ata_pci_default_filter);
-EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
#endif /* CONFIG_PCI */
-EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
-EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
-EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
-EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
-EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
-
EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
EXPORT_SYMBOL_GPL(ata_do_eh);
-EXPORT_SYMBOL_GPL(ata_irq_on);
-EXPORT_SYMBOL_GPL(ata_dev_try_classify);
+EXPORT_SYMBOL_GPL(ata_std_error_handler);
EXPORT_SYMBOL_GPL(ata_cable_40wire);
EXPORT_SYMBOL_GPL(ata_cable_80wire);
EXPORT_SYMBOL_GPL(ata_cable_unknown);
+EXPORT_SYMBOL_GPL(ata_cable_ignore);
EXPORT_SYMBOL_GPL(ata_cable_sata);