X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=drivers%2Fata%2Flibata-core.c;h=06b7e49e039c1e0a2e5b630fe92a1d5d33143b8a;hb=43c9c59185eec7caaff6e9dd8d4c93a4d9836a86;hp=6eed58e35e129616b694ab2251c62576ddc6708f;hpb=b1c72916abbdd0a55015c87358536ca0ebaf6735;p=safe%2Fjmp%2Flinux-2.6 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 6eed58e..06b7e49 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -56,12 +56,16 @@ #include #include #include +#include +#include +#include #include #include #include #include #include #include +#include #include "libata.h" @@ -93,7 +97,6 @@ static void ata_dev_xfermask(struct ata_device *dev); static unsigned long ata_dev_blacklisted(const struct ata_device *dev); unsigned int ata_print_id = 1; -static struct workqueue_struct *ata_wq; struct workqueue_struct *ata_aux_wq; @@ -123,19 +126,19 @@ MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link spe static int atapi_enabled = 1; module_param(atapi_enabled, int, 0444); -MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); +MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); static int atapi_dmadir = 0; module_param(atapi_dmadir, int, 0444); -MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); +MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); int atapi_passthru16 = 1; module_param(atapi_passthru16, int, 0444); -MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); +MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); int libata_fua = 0; module_param_named(fua, libata_fua, int, 0444); -MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); +MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); static int ata_ignore_hpa; module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); @@ -151,11 +154,15 @@ MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); int libata_noacpi = 0; module_param_named(noacpi, libata_noacpi, int, 0444); -MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); +MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); int libata_allow_tpm = 0; module_param_named(allow_tpm, libata_allow_tpm, int, 0444); -MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); +MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); + +static int atapi_an; +module_param(atapi_an, int, 0444); +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); MODULE_AUTHOR("Jeff Garzik"); MODULE_DESCRIPTION("Library module for ATA devices"); @@ -163,43 +170,124 @@ MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); -/* - * Iterator helpers. Don't use directly. +static bool ata_sstatus_online(u32 sstatus) +{ + return (sstatus & 0xf) == 0x3; +} + +/** + * ata_link_next - link iteration helper + * @link: the previous link, NULL to start + * @ap: ATA port containing links to iterate + * @mode: iteration mode, one of ATA_LITER_* * - * LOCKING: - * Host lock or EH context. + * LOCKING: + * Host lock or EH context. + * + * RETURNS: + * Pointer to the next link. */ -struct ata_link *__ata_port_next_link(struct ata_port *ap, - struct ata_link *link, bool dev_only) +struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, + enum ata_link_iter_mode mode) { + BUG_ON(mode != ATA_LITER_EDGE && + mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); + /* NULL link indicates start of iteration */ - if (!link) { - if (dev_only && sata_pmp_attached(ap)) - return ap->pmp_link; - return &ap->link; - } + if (!link) + switch (mode) { + case ATA_LITER_EDGE: + case ATA_LITER_PMP_FIRST: + if (sata_pmp_attached(ap)) + return ap->pmp_link; + /* fall through */ + case ATA_LITER_HOST_FIRST: + return &ap->link; + } - /* we just iterated over the host master link, what's next? */ - if (link == &ap->link) { - if (!sata_pmp_attached(ap)) { - if (unlikely(ap->slave_link) && !dev_only) + /* we just iterated over the host link, what's next? */ + if (link == &ap->link) + switch (mode) { + case ATA_LITER_HOST_FIRST: + if (sata_pmp_attached(ap)) + return ap->pmp_link; + /* fall through */ + case ATA_LITER_PMP_FIRST: + if (unlikely(ap->slave_link)) return ap->slave_link; + /* fall through */ + case ATA_LITER_EDGE: return NULL; } - return ap->pmp_link; - } /* slave_link excludes PMP */ if (unlikely(link == ap->slave_link)) return NULL; - /* iterate to the next PMP link */ + /* we were over a PMP link */ if (++link < ap->pmp_link + ap->nr_pmp_links) return link; + + if (mode == ATA_LITER_PMP_FIRST) + return &ap->link; + return NULL; } /** + * ata_dev_next - device iteration helper + * @dev: the previous device, NULL to start + * @link: ATA link containing devices to iterate + * @mode: iteration mode, one of ATA_DITER_* + * + * LOCKING: + * Host lock or EH context. + * + * RETURNS: + * Pointer to the next device. + */ +struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, + enum ata_dev_iter_mode mode) +{ + BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && + mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); + + /* NULL dev indicates start of iteration */ + if (!dev) + switch (mode) { + case ATA_DITER_ENABLED: + case ATA_DITER_ALL: + dev = link->device; + goto check; + case ATA_DITER_ENABLED_REVERSE: + case ATA_DITER_ALL_REVERSE: + dev = link->device + ata_link_max_devices(link) - 1; + goto check; + } + + next: + /* move to the next one */ + switch (mode) { + case ATA_DITER_ENABLED: + case ATA_DITER_ALL: + if (++dev < link->device + ata_link_max_devices(link)) + goto check; + return NULL; + case ATA_DITER_ENABLED_REVERSE: + case ATA_DITER_ALL_REVERSE: + if (--dev >= link->device) + goto check; + return NULL; + } + + check: + if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && + !ata_dev_enabled(dev)) + goto next; + return dev; +} + +/** * ata_dev_phys_link - find physical link for a device * @dev: ATA device to look up physical link for * @@ -612,7 +700,7 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) if (tf->flags & ATA_TFLAG_LBA48) { block |= (u64)tf->hob_lbah << 40; block |= (u64)tf->hob_lbam << 32; - block |= tf->hob_lbal << 24; + block |= (u64)tf->hob_lbal << 24; } else block |= (tf->device & 0xf) << 24; @@ -626,7 +714,13 @@ u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) head = tf->device & 0xf; sect = tf->lbal; - block = (cyl * dev->heads + head) * dev->sectors + sect; + if (!sect) { + ata_dev_printk(dev, KERN_WARNING, "device reported " + "invalid CHS sector 0\n"); + sect = 1; /* oh well */ + } + + block = (cyl * dev->heads + head) * dev->sectors + sect - 1; } return block; @@ -930,6 +1024,7 @@ static const char *sata_spd_string(unsigned int spd) static const char * const spd_str[] = { "1.5 Gbps", "3.0 Gbps", + "6.0 Gbps", }; if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) @@ -937,18 +1032,6 @@ static const char *sata_spd_string(unsigned int spd) return spd_str[spd - 1]; } -void ata_dev_disable(struct ata_device *dev) -{ - if (ata_dev_enabled(dev)) { - if (ata_msg_drv(dev->link->ap)) - ata_dev_printk(dev, KERN_WARNING, "disabled\n"); - ata_acpi_on_disable(dev); - ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | - ATA_DNXFER_QUIET); - dev->class++; - } -} - static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) { struct ata_link *link = dev->link; @@ -1107,8 +1190,8 @@ static void ata_lpm_enable(struct ata_host *host) for (i = 0; i < host->n_ports; i++) { ap = host->ports[i]; - ata_port_for_each_link(link, ap) { - ata_link_for_each_dev(dev, link) + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ALL) ata_dev_disable_pm(dev); } } @@ -1159,6 +1242,9 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) * * We follow the current spec and consider that 0x69/0x96 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. + * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports + * SEMB signature. This is worked around in + * ata_dev_read_id(). */ if ((tf->lbam == 0) && (tf->lbah == 0)) { DPRINTK("found ATA device by sig\n"); @@ -1176,8 +1262,8 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf) } if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { - printk(KERN_INFO "ata: SEMB device ignored\n"); - return ATA_DEV_SEMB_UNSUP; /* not yet */ + DPRINTK("found SEMB device by sig (could be ATA device)\n"); + return ATA_DEV_SEMB; } DPRINTK("unknown device\n"); @@ -1251,14 +1337,16 @@ static u64 ata_id_n_sectors(const u16 *id) { if (ata_id_has_lba(id)) { if (ata_id_has_lba48(id)) - return ata_id_u64(id, 100); + return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); else - return ata_id_u32(id, 60); + return ata_id_u32(id, ATA_ID_LBA_CAPACITY); } else { if (ata_id_current_chs_valid(id)) - return ata_id_u32(id, 57); + return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * + id[ATA_ID_CUR_SECTORS]; else - return id[1] * id[3] * id[6]; + return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * + id[ATA_ID_SECTORS]; } } @@ -1268,7 +1356,7 @@ u64 ata_tf_to_lba48(const struct ata_taskfile *tf) sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; - sectors |= (tf->hob_lbal & 0xff) << 24; + sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; sectors |= (tf->lbah & 0xff) << 16; sectors |= (tf->lbam & 0xff) << 8; sectors |= (tf->lbal & 0xff); @@ -1410,6 +1498,7 @@ static int ata_hpa_resize(struct ata_device *dev) { struct ata_eh_context *ehc = &dev->link->eh_context; int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; + bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; u64 sectors = ata_id_n_sectors(dev->id); u64 native_sectors; int rc; @@ -1426,7 +1515,7 @@ static int ata_hpa_resize(struct ata_device *dev) /* If device aborted the command or HPA isn't going to * be unlocked, skip HPA resizing. */ - if (rc == -EACCES || !ata_ignore_hpa) { + if (rc == -EACCES || !unlock_hpa) { ata_dev_printk(dev, KERN_WARNING, "HPA support seems " "broken, skipping HPA handling\n"); dev->horkage |= ATA_HORKAGE_BROKEN_HPA; @@ -1438,9 +1527,10 @@ static int ata_hpa_resize(struct ata_device *dev) return rc; } + dev->n_native_sectors = native_sectors; /* nothing to do? */ - if (native_sectors <= sectors || !ata_ignore_hpa) { + if (native_sectors <= sectors || !unlock_hpa) { if (!print_info || native_sectors == sectors) return 0; @@ -1579,8 +1669,8 @@ unsigned long ata_id_xfermask(const u16 *id) /* * Process compact flash extended modes */ - int pio = id[163] & 0x7; - int dma = (id[163] >> 3) & 7; + int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; + int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; if (pio) pio_mask |= (1 << 5); @@ -1599,53 +1689,6 @@ unsigned long ata_id_xfermask(const u16 *id) return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); } -/** - * ata_pio_queue_task - Queue port_task - * @ap: The ata_port to queue port_task for - * @fn: workqueue function to be scheduled - * @data: data for @fn to use - * @delay: delay time in msecs for workqueue function - * - * Schedule @fn(@data) for execution after @delay jiffies using - * port_task. There is one port_task per port and it's the - * user(low level driver)'s responsibility to make sure that only - * one task is active at any given time. - * - * libata core layer takes care of synchronization between - * port_task and EH. ata_pio_queue_task() may be ignored for EH - * synchronization. - * - * LOCKING: - * Inherited from caller. - */ -void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) -{ - ap->port_task_data = data; - - /* may fail if ata_port_flush_task() in progress */ - queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay)); -} - -/** - * ata_port_flush_task - Flush port_task - * @ap: The ata_port to flush port_task for - * - * After this function completes, port_task is guranteed not to - * be running or scheduled. - * - * LOCKING: - * Kernel thread context (may sleep) - */ -void ata_port_flush_task(struct ata_port *ap) -{ - DPRINTK("ENTER\n"); - - cancel_rearming_delayed_work(&ap->port_task); - - if (ata_msg_ctl(ap)) - ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); -} - static void ata_qc_complete_internal(struct ata_queued_cmd *qc) { struct completion *waiting = qc->private_data; @@ -1767,7 +1810,7 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); - ata_port_flush_task(ap); + ata_sff_flush_pio_task(ap); if (!rc) { spin_lock_irqsave(ap->lock, flags); @@ -1821,22 +1864,6 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, ap->qc_active = preempted_qc_active; ap->nr_active_links = preempted_nr_active_links; - /* XXX - Some LLDDs (sata_mv) disable port on command failure. - * Until those drivers are fixed, we detect the condition - * here, fail the command with AC_ERR_SYSTEM and reenable the - * port. - * - * Note that this doesn't change any behavior as internal - * command failure results in disabling the device in the - * higher layer for LLDDs without new reset/EH callbacks. - * - * Kill the following code as soon as those drivers are fixed. - */ - if (ap->flags & ATA_FLAG_DISABLED) { - err_mask |= AC_ERR_SYSTEM; - ata_port_probe(ap); - } - spin_unlock_irqrestore(ap->lock, flags); if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) @@ -1917,13 +1944,23 @@ unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) * Check if the current speed of the device requires IORDY. Used * by various controllers for chip configuration. */ - unsigned int ata_pio_need_iordy(const struct ata_device *adev) { - /* Controller doesn't support IORDY. Probably a pointless check - as the caller should know this */ + /* Don't set IORDY if we're preparing for reset. IORDY may + * lead to controller lock up on certain controllers if the + * port is not occupied. See bko#11703 for details. + */ + if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) + return 0; + /* Controller doesn't support IORDY. Probably a pointless + * check as the caller should know this. + */ if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) return 0; + /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ + if (ata_id_is_cfa(adev->id) + && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) + return 0; /* PIO3 and higher it is mandatory */ if (adev->pio_mode > XFER_PIO_2) return 1; @@ -1940,7 +1977,6 @@ unsigned int ata_pio_need_iordy(const struct ata_device *adev) * Compute the highest mode possible if we are not using iordy. Return * -1 if no iordy mode is available. */ - static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) { /* If we have no drive specific rule, then PIO 2 is non IORDY */ @@ -2003,6 +2039,7 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, struct ata_taskfile tf; unsigned int err_mask = 0; const char *reason; + bool is_semb = class == ATA_DEV_SEMB; int may_fallback = 1, tried_spinup = 0; int rc; @@ -2013,6 +2050,8 @@ retry: ata_tf_init(dev, &tf); switch (class) { + case ATA_DEV_SEMB: + class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ case ATA_DEV_ATA: tf.command = ATA_CMD_ID_ATA; break; @@ -2049,6 +2088,14 @@ retry: return -ENOENT; } + if (is_semb) { + ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " + "device w/ SEMB sig, disabled\n"); + /* SEMB is not supported yet */ + *p_class = ATA_DEV_SEMB_UNSUP; + return 0; + } + if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { /* Device or controller might have reported * the wrong device class. Give a shot at the @@ -2079,6 +2126,14 @@ retry: goto err_out; } + if (dev->horkage & ATA_HORKAGE_DUMP_ID) { + ata_dev_printk(dev, KERN_DEBUG, "dumping IDENTIFY data, " + "class=%d may_fallback=%d tried_spinup=%d\n", + class, may_fallback, tried_spinup); + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, + 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); + } + /* Falling back doesn't make sense if ID data was read * successfully at least once. */ @@ -2129,7 +2184,7 @@ retry: * Some drives were very specific about that exact sequence. * * Note that ATA4 says lba is mandatory so the second check - * shoud never trigger. + * should never trigger. */ if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { err_mask = ata_dev_init_params(dev, id[3], id[6]); @@ -2158,35 +2213,93 @@ retry: return rc; } +static int ata_do_link_spd_horkage(struct ata_device *dev) +{ + struct ata_link *plink = ata_dev_phys_link(dev); + u32 target, target_limit; + + if (!sata_scr_valid(plink)) + return 0; + + if (dev->horkage & ATA_HORKAGE_1_5_GBPS) + target = 1; + else + return 0; + + target_limit = (1 << target) - 1; + + /* if already on stricter limit, no need to push further */ + if (plink->sata_spd_limit <= target_limit) + return 0; + + plink->sata_spd_limit = target_limit; + + /* Request another EH round by returning -EAGAIN if link is + * going faster than the target speed. Forward progress is + * guaranteed by setting sata_spd_limit to target_limit above. + */ + if (plink->sata_spd > target) { + ata_dev_printk(dev, KERN_INFO, + "applying link speed limit horkage to %s\n", + sata_spd_string(target)); + return -EAGAIN; + } + return 0; +} + static inline u8 ata_dev_knobble(struct ata_device *dev) { struct ata_port *ap = dev->link->ap; + + if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) + return 0; + return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); } -static void ata_dev_config_ncq(struct ata_device *dev, +static int ata_dev_config_ncq(struct ata_device *dev, char *desc, size_t desc_sz) { struct ata_port *ap = dev->link->ap; int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); + unsigned int err_mask; + char *aa_desc = ""; if (!ata_id_has_ncq(dev->id)) { desc[0] = '\0'; - return; + return 0; } if (dev->horkage & ATA_HORKAGE_NONCQ) { snprintf(desc, desc_sz, "NCQ (not used)"); - return; + return 0; } if (ap->flags & ATA_FLAG_NCQ) { hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); dev->flags |= ATA_DFLAG_NCQ; } + if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && + (ap->flags & ATA_FLAG_FPDMA_AA) && + ata_id_has_fpdma_aa(dev->id)) { + err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, + SATA_FPDMA_AA); + if (err_mask) { + ata_dev_printk(dev, KERN_ERR, "failed to enable AA" + "(error_mask=0x%x)\n", err_mask); + if (err_mask != AC_ERR_DEV) { + dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; + return -EIO; + } + } else + aa_desc = ", AA"; + } + if (hdepth >= ddepth) - snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); + snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); else - snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); + snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, + ddepth, aa_desc); + return 0; } /** @@ -2244,6 +2357,10 @@ int ata_dev_configure(struct ata_device *dev) return 0; } + rc = ata_do_link_spd_horkage(dev); + if (rc) + return rc; + /* let ACPI work its magic */ rc = ata_acpi_on_devcfg(dev); if (rc) @@ -2271,6 +2388,7 @@ int ata_dev_configure(struct ata_device *dev) dev->cylinders = 0; dev->heads = 0; dev->sectors = 0; + dev->multi_count = 0; /* * common ATA, ATAPI feature tests @@ -2292,7 +2410,8 @@ int ata_dev_configure(struct ata_device *dev) /* ATA-specific feature tests */ if (dev->class == ATA_DEV_ATA) { if (ata_id_is_cfa(id)) { - if (id[162] & 1) /* CPRM may make this media unusable */ + /* CPRM may make this media unusable */ + if (id[ATA_ID_CFA_KEY_MGMT] & 1) ata_dev_printk(dev, KERN_WARNING, "supports DRM functions and may " "not be fully accessable.\n"); @@ -2308,12 +2427,19 @@ int ata_dev_configure(struct ata_device *dev) dev->n_sectors = ata_id_n_sectors(id); - if (dev->id[59] & 0x100) - dev->multi_count = dev->id[59] & 0xff; + /* get current R/W Multiple count setting */ + if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { + unsigned int max = dev->id[47] & 0xff; + unsigned int cnt = dev->id[59] & 0xff; + /* only recognize/allow powers of two here */ + if (is_power_of_2(max) && is_power_of_2(cnt)) + if (cnt <= max) + dev->multi_count = cnt; + } if (ata_id_has_lba(id)) { const char *lba_desc; - char ncq_desc[20]; + char ncq_desc[24]; lba_desc = "LBA"; dev->flags |= ATA_DFLAG_LBA; @@ -2327,7 +2453,9 @@ int ata_dev_configure(struct ata_device *dev) } /* config NCQ */ - ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); + rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); + if (rc) + return rc; /* print device info to dmesg */ if (ata_msg_drv(ap) && print_info) { @@ -2394,7 +2522,8 @@ int ata_dev_configure(struct ata_device *dev) * to enable ATAPI AN to discern between PHY status * changed notifications and ATAPI ANs. */ - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && + if (atapi_an && + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && (!sata_pmp_attached(ap) || sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { unsigned int err_mask; @@ -2489,6 +2618,13 @@ int ata_dev_configure(struct ata_device *dev) } } + if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { + ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " + "firmware update to be fully functional.\n"); + ata_dev_printk(dev, KERN_WARNING, " contact the vendor " + "or visit http://ata.wiki.kernel.org.\n"); + } + return 0; err_out_nosup: @@ -2582,13 +2718,11 @@ int ata_bus_probe(struct ata_port *ap) int rc; struct ata_device *dev; - ata_port_probe(ap); - - ata_link_for_each_dev(dev, &ap->link) + ata_for_each_dev(dev, &ap->link, ALL) tries[dev->devno] = ATA_PROBE_MAX_TRIES; retry: - ata_link_for_each_dev(dev, &ap->link) { + ata_for_each_dev(dev, &ap->link, ALL) { /* If we issue an SRST then an ATA drive (not ATAPI) * may change configuration and be in PIO0 timing. If * we do a hard reset (or are coming from power on) @@ -2610,9 +2744,8 @@ int ata_bus_probe(struct ata_port *ap) /* reset and determine device classes */ ap->ops->phy_reset(ap); - ata_link_for_each_dev(dev, &ap->link) { - if (!(ap->flags & ATA_FLAG_DISABLED) && - dev->class != ATA_DEV_UNKNOWN) + ata_for_each_dev(dev, &ap->link, ALL) { + if (dev->class != ATA_DEV_UNKNOWN) classes[dev->devno] = dev->class; else classes[dev->devno] = ATA_DEV_NONE; @@ -2620,13 +2753,11 @@ int ata_bus_probe(struct ata_port *ap) dev->class = ATA_DEV_UNKNOWN; } - ata_port_probe(ap); - /* read IDENTIFY page and configure devices. We have to do the identify specific sequence bass-ackwards so that PDIAG- is released by the slave device */ - ata_link_for_each_dev_reverse(dev, &ap->link) { + ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { if (tries[dev->devno]) dev->class = classes[dev->devno]; @@ -2643,24 +2774,19 @@ int ata_bus_probe(struct ata_port *ap) if (ap->ops->cable_detect) ap->cbl = ap->ops->cable_detect(ap); - /* We may have SATA bridge glue hiding here irrespective of the - reported cable types and sensed types */ - ata_link_for_each_dev(dev, &ap->link) { - if (!ata_dev_enabled(dev)) - continue; - /* SATA drives indicate we have a bridge. We don't know which - end of the link the bridge is which is a problem */ + /* We may have SATA bridge glue hiding here irrespective of + * the reported cable types and sensed types. When SATA + * drives indicate we have a bridge, we don't know which end + * of the link the bridge is which is a problem. + */ + ata_for_each_dev(dev, &ap->link, ENABLED) if (ata_id_is_sata(dev->id)) ap->cbl = ATA_CBL_SATA; - } /* After the identify sequence we can now set up the devices. We do this in the normal order so that the user doesn't get confused */ - ata_link_for_each_dev(dev, &ap->link) { - if (!ata_dev_enabled(dev)) - continue; - + ata_for_each_dev(dev, &ap->link, ENABLED) { ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; rc = ata_dev_configure(dev); ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; @@ -2673,12 +2799,9 @@ int ata_bus_probe(struct ata_port *ap) if (rc) goto fail; - ata_link_for_each_dev(dev, &ap->link) - if (ata_dev_enabled(dev)) - return 0; + ata_for_each_dev(dev, &ap->link, ENABLED) + return 0; - /* no device present, disable port */ - ata_port_disable(ap); return -ENODEV; fail: @@ -2698,7 +2821,7 @@ int ata_bus_probe(struct ata_port *ap) /* This is the last chance, better to slow * down than lose it. */ - sata_down_spd_limit(&ap->link); + sata_down_spd_limit(&ap->link, 0); ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); } } @@ -2710,22 +2833,6 @@ int ata_bus_probe(struct ata_port *ap) } /** - * ata_port_probe - Mark port as enabled - * @ap: Port for which we indicate enablement - * - * Modify @ap data structure such that the system - * thinks that the entire port is enabled. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_probe(struct ata_port *ap) -{ - ap->flags &= ~ATA_FLAG_DISABLED; -} - -/** * sata_print_link_status - Print SATA link status * @link: SATA link to printk link status about * @@ -2772,43 +2879,29 @@ struct ata_device *ata_dev_pair(struct ata_device *adev) } /** - * ata_port_disable - Disable port. - * @ap: Port to be disabled. - * - * Modify @ap data structure such that the system - * thinks that the entire port is disabled, and should - * never attempt to probe or communicate with devices - * on this port. - * - * LOCKING: host lock, or some other form of - * serialization. - */ - -void ata_port_disable(struct ata_port *ap) -{ - ap->link.device[0].class = ATA_DEV_NONE; - ap->link.device[1].class = ATA_DEV_NONE; - ap->flags |= ATA_FLAG_DISABLED; -} - -/** * sata_down_spd_limit - adjust SATA spd limit downward * @link: Link to adjust SATA spd limit for + * @spd_limit: Additional limit * * Adjust SATA spd limit of @link downward. Note that this * function only adjusts the limit. The change must be applied * using sata_set_spd(). * + * If @spd_limit is non-zero, the speed is limited to equal to or + * lower than @spd_limit if such speed is supported. If + * @spd_limit is slower than any supported speed, only the lowest + * supported speed is allowed. + * * LOCKING: * Inherited from caller. * * RETURNS: * 0 on success, negative errno on failure */ -int sata_down_spd_limit(struct ata_link *link) +int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) { u32 sstatus, spd, mask; - int rc, highbit; + int rc, bit; if (!sata_scr_valid(link)) return -EOPNOTSUPP; @@ -2817,7 +2910,7 @@ int sata_down_spd_limit(struct ata_link *link) * If not, use cached value in link->sata_spd. */ rc = sata_scr_read(link, SCR_STATUS, &sstatus); - if (rc == 0) + if (rc == 0 && ata_sstatus_online(sstatus)) spd = (sstatus >> 4) & 0xf; else spd = link->sata_spd; @@ -2827,8 +2920,8 @@ int sata_down_spd_limit(struct ata_link *link) return -EINVAL; /* unconditionally mask off the highest bit */ - highbit = fls(mask) - 1; - mask &= ~(1 << highbit); + bit = fls(mask) - 1; + mask &= ~(1 << bit); /* Mask off all speeds higher than or equal to the current * one. Force 1.5Gbps if current SPD is not available. @@ -2842,6 +2935,15 @@ int sata_down_spd_limit(struct ata_link *link) if (!mask) return -EINVAL; + if (spd_limit) { + if (mask & ((1 << spd_limit) - 1)) + mask &= (1 << spd_limit) - 1; + else { + bit = ffs(mask) - 1; + mask = 1 << bit; + } + } + link->sata_spd_limit = mask; ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", @@ -2943,33 +3045,33 @@ int sata_set_spd(struct ata_link *link) */ static const struct ata_timing ata_timing[] = { -/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ - { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, - { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, - { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, - { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, - { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, - { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, - { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, - - { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, - { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, - { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, - - { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, - { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, - { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, - { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, - { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, - -/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ - { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, - { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, - { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, - { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, - { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, - { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, - { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, +/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ + { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, + { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, + { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, + { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, + { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, + { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, + { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, + + { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, + { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, + { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, + + { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, + { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, + { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, + { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, + { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, + +/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ + { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, + { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, + { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, + { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, + { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, { 0xFF } }; @@ -2979,14 +3081,15 @@ static const struct ata_timing ata_timing[] = { static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) { - q->setup = EZ(t->setup * 1000, T); - q->act8b = EZ(t->act8b * 1000, T); - q->rec8b = EZ(t->rec8b * 1000, T); - q->cyc8b = EZ(t->cyc8b * 1000, T); - q->active = EZ(t->active * 1000, T); - q->recover = EZ(t->recover * 1000, T); - q->cycle = EZ(t->cycle * 1000, T); - q->udma = EZ(t->udma * 1000, UT); + q->setup = EZ(t->setup * 1000, T); + q->act8b = EZ(t->act8b * 1000, T); + q->rec8b = EZ(t->rec8b * 1000, T); + q->cyc8b = EZ(t->cyc8b * 1000, T); + q->active = EZ(t->active * 1000, T); + q->recover = EZ(t->recover * 1000, T); + q->dmack_hold = EZ(t->dmack_hold * 1000, T); + q->cycle = EZ(t->cycle * 1000, T); + q->udma = EZ(t->udma * 1000, UT); } void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, @@ -2998,6 +3101,7 @@ void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); + if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); } @@ -3017,6 +3121,7 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) int ata_timing_compute(struct ata_device *adev, unsigned short speed, struct ata_timing *t, int T, int UT) { + const u16 *id = adev->id; const struct ata_timing *s; struct ata_timing p; @@ -3034,14 +3139,18 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, * PIO/MW_DMA cycle timing. */ - if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ + if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ memset(&p, 0, sizeof(p)); + if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { - if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; - else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; - } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { - p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; - } + if (speed <= XFER_PIO_2) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; + else if ((speed <= XFER_PIO_4) || + (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) + p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) + p.cycle = id[ATA_ID_EIDE_DMA_MIN]; + ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); } @@ -3227,17 +3336,27 @@ int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) static int ata_dev_set_mode(struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; struct ata_eh_context *ehc = &dev->link->eh_context; + const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; const char *dev_err_whine = ""; int ign_dev_err = 0; - unsigned int err_mask; + unsigned int err_mask = 0; int rc; dev->flags &= ~ATA_DFLAG_PIO; if (dev->xfer_shift == ATA_SHIFT_PIO) dev->flags |= ATA_DFLAG_PIO; - err_mask = ata_dev_set_xfermode(dev); + if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) + dev_err_whine = " (SET_XFERMODE skipped)"; + else { + if (nosetxfer) + ata_dev_printk(dev, KERN_WARNING, + "NOSETXFER but PATA detected - can't " + "skip SETXFER, might malfunction\n"); + err_mask = ata_dev_set_xfermode(dev); + } if (err_mask & ~AC_ERR_DEV) goto fail; @@ -3321,13 +3440,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) int rc = 0, used_dma = 0, found = 0; /* step 1: calculate xfer_mask */ - ata_link_for_each_dev(dev, link) { + ata_for_each_dev(dev, link, ENABLED) { unsigned long pio_mask, dma_mask; unsigned int mode_mask; - if (!ata_dev_enabled(dev)) - continue; - mode_mask = ATA_DMA_MASK_ATA; if (dev->class == ATA_DEV_ATAPI) mode_mask = ATA_DMA_MASK_ATAPI; @@ -3356,10 +3472,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) goto out; /* step 2: always set host PIO timings */ - ata_link_for_each_dev(dev, link) { - if (!ata_dev_enabled(dev)) - continue; - + ata_for_each_dev(dev, link, ENABLED) { if (dev->pio_mode == 0xff) { ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); rc = -EINVAL; @@ -3373,8 +3486,8 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) } /* step 3: set host DMA timings */ - ata_link_for_each_dev(dev, link) { - if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev)) + ata_for_each_dev(dev, link, ENABLED) { + if (!ata_dma_enabled(dev)) continue; dev->xfer_mode = dev->dma_mode; @@ -3384,11 +3497,7 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) } /* step 4: update devices' xfer mode */ - ata_link_for_each_dev(dev, link) { - /* don't update suspended devices' xfer mode */ - if (!ata_dev_enabled(dev)) - continue; - + ata_for_each_dev(dev, link, ENABLED) { rc = ata_dev_set_mode(dev); if (rc) goto out; @@ -3430,9 +3539,15 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)) { unsigned long start = jiffies; - unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); + unsigned long nodev_deadline; int warned = 0; + /* choose which 0xff timeout to use, read comment in libata.h */ + if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); + else + nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); + /* Slave readiness can't be tested separately from master. On * M/S emulation configuration, this function should be called * only on the master and it will handle both master and slave. @@ -3450,12 +3565,12 @@ int ata_wait_ready(struct ata_link *link, unsigned long deadline, if (ready > 0) return 0; - /* -ENODEV could be transient. Ignore -ENODEV if link + /* + * -ENODEV could be transient. Ignore -ENODEV if link * is online. Also, some SATA devices take a long - * time to clear 0xff after reset. For example, - * HHD424020F7SV00 iVDR needs >= 800ms while Quantum - * GoVault needs even more than that. Wait for - * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline. + * time to clear 0xff after reset. Wait for + * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't + * offline. * * Note that some PATA controllers (pata_ali) explode * if status register is read more than once when @@ -3596,21 +3711,45 @@ int sata_link_debounce(struct ata_link *link, const unsigned long *params, int sata_link_resume(struct ata_link *link, const unsigned long *params, unsigned long deadline) { + int tries = ATA_LINK_RESUME_TRIES; u32 scontrol, serror; int rc; if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) return rc; - scontrol = (scontrol & 0x0f0) | 0x300; + /* + * Writes to SControl sometimes get ignored under certain + * controllers (ata_piix SIDPR). Make sure DET actually is + * cleared. + */ + do { + scontrol = (scontrol & 0x0f0) | 0x300; + if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) + return rc; + /* + * Some PHYs react badly if SStatus is pounded + * immediately after resuming. Delay 200ms before + * debouncing. + */ + msleep(200); - if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) - return rc; + /* is SControl restored correctly? */ + if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) + return rc; + } while ((scontrol & 0xf0f) != 0x300 && --tries); - /* Some PHYs react badly if SStatus is pounded immediately - * after resuming. Delay 200ms before debouncing. - */ - msleep(200); + if ((scontrol & 0xf0f) != 0x300) { + ata_link_printk(link, KERN_ERR, + "failed to resume link (SControl %X)\n", + scontrol); + return 0; + } + + if (tries < ATA_LINK_RESUME_TRIES) + ata_link_printk(link, KERN_WARNING, + "link resume succeeded after %d retries\n", + ATA_LINK_RESUME_TRIES - tries); if ((rc = sata_link_debounce(link, params, deadline))) return rc; @@ -3934,6 +4073,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, unsigned int readid_flags) { u64 n_sectors = dev->n_sectors; + u64 n_native_sectors = dev->n_native_sectors; int rc; if (!ata_dev_enabled(dev)) @@ -3941,7 +4081,9 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ if (ata_class_enabled(new_class) && - new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { + new_class != ATA_DEV_ATA && + new_class != ATA_DEV_ATAPI && + new_class != ATA_DEV_SEMB) { ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", dev->class, new_class); rc = -ENODEV; @@ -3959,22 +4101,51 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, goto fail; /* verify n_sectors hasn't changed */ - if (dev->class == ATA_DEV_ATA && n_sectors && - dev->n_sectors != n_sectors) { - ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " - "%llu != %llu\n", - (unsigned long long)n_sectors, - (unsigned long long)dev->n_sectors); - - /* restore original n_sectors */ - dev->n_sectors = n_sectors; + if (dev->class != ATA_DEV_ATA || !n_sectors || + dev->n_sectors == n_sectors) + return 0; - rc = -ENODEV; - goto fail; + /* n_sectors has changed */ + ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", + (unsigned long long)n_sectors, + (unsigned long long)dev->n_sectors); + + /* + * Something could have caused HPA to be unlocked + * involuntarily. If n_native_sectors hasn't changed and the + * new size matches it, keep the device. + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { + ata_dev_printk(dev, KERN_WARNING, + "new n_sectors matches native, probably " + "late HPA unlock, continuing\n"); + /* keep using the old n_sectors */ + dev->n_sectors = n_sectors; + return 0; } - return 0; + /* + * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try + * unlocking HPA in those cases. + * + * https://bugzilla.kernel.org/show_bug.cgi?id=15396 + */ + if (dev->n_native_sectors == n_native_sectors && + dev->n_sectors < n_sectors && n_sectors == n_native_sectors && + !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { + ata_dev_printk(dev, KERN_WARNING, + "old n_sectors matches native, probably " + "late HPA lock, will try to unlock HPA\n"); + /* try unlocking HPA */ + dev->flags |= ATA_DFLAG_UNLOCK_HPA; + rc = -EIO; + } else + rc = -ENODEV; + /* restore original n_[native_]sectors and fail */ + dev->n_native_sectors = n_native_sectors; + dev->n_sectors = n_sectors; fail: ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); return rc; @@ -4023,6 +4194,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, + { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, /* Devices we expect to fail diagnostics */ @@ -4037,6 +4209,74 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, + { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, + + /* Seagate NCQ + FLUSH CACHE firmware bug */ + { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + + { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4044,12 +4284,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ + { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, + /* devices which puke on READ_NATIVE_MAX */ { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, + /* this one allows HPA unlocking but fails IOs on the area */ + { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, + /* Devices which report 1 sector over size HPA */ { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, @@ -4065,6 +4311,18 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, + /* Devices that do not need bridging limits applied */ + { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, + + /* Devices which aren't very happy with higher link speeds */ + { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, + + /* + * Devices which choke on SETXFER. Applies only if both the + * device and controller are SATA. + */ + { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, + /* End Marker */ { } }; @@ -4158,28 +4416,32 @@ static int cable_is_40wire(struct ata_port *ap) struct ata_link *link; struct ata_device *dev; - /* If the controller thinks we are 40 wire, we are */ + /* If the controller thinks we are 40 wire, we are. */ if (ap->cbl == ATA_CBL_PATA40) return 1; - /* If the controller thinks we are 80 wire, we are */ + + /* If the controller thinks we are 80 wire, we are. */ if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) return 0; - /* If the system is known to be 40 wire short cable (eg laptop), - then we allow 80 wire modes even if the drive isn't sure */ + + /* If the system is known to be 40 wire short cable (eg + * laptop), then we allow 80 wire modes even if the drive + * isn't sure. + */ if (ap->cbl == ATA_CBL_PATA40_SHORT) return 0; - /* If the controller doesn't know we scan - - - Note: We look for all 40 wire detects at this point. - Any 80 wire detect is taken to be 80 wire cable - because - - In many setups only the one drive (slave if present) - will give a valid detect - - If you have a non detect capable drive you don't - want it to colour the choice - */ - ata_port_for_each_link(link, ap) { - ata_link_for_each_dev(dev, link) { + + /* If the controller doesn't know, we scan. + * + * Note: We look for all 40 wire detects at this point. Any + * 80 wire detect is taken to be 80 wire cable because + * - in many setups only the one drive (slave if present) will + * give a valid detect + * - if you have a non detect capable drive you don't want it + * to colour the choice + */ + ata_for_each_link(link, ap, EDGE) { + ata_for_each_dev(dev, link, ENABLED) { if (!ata_is_40wire(dev)) return 0; } @@ -4404,12 +4666,12 @@ void ata_sg_clean(struct ata_queued_cmd *qc) struct scatterlist *sg = qc->sg; int dir = qc->dma_dir; - WARN_ON(sg == NULL); + WARN_ON_ONCE(sg == NULL); VPRINTK("unmapping %u sg elements\n", qc->n_elem); if (qc->n_elem) - dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); + dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); qc->flags &= ~ATA_QCFLAG_DMAMAP; qc->sg = NULL; @@ -4436,7 +4698,8 @@ int atapi_check_dma(struct ata_queued_cmd *qc) /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a * few ATAPI devices choke on such DMA requests. */ - if (unlikely(qc->nbytes & 15)) + if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && + unlikely(qc->nbytes & 15)) return 1; if (ap->ops->check_atapi_dma) @@ -4523,7 +4786,7 @@ static int ata_sg_setup(struct ata_queued_cmd *qc) return -1; DPRINTK("%d sg elements mapped\n", n_elem); - + qc->orig_n_elem = qc->n_elem; qc->n_elem = n_elem; qc->flags |= ATA_QCFLAG_DMAMAP; @@ -4554,8 +4817,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words) /** * ata_qc_new - Request an available ATA command, for queueing - * @ap: Port associated with device @dev - * @dev: Device from whom we request an available command structure + * @ap: target port * * LOCKING: * None. @@ -4620,10 +4882,11 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) */ void ata_qc_free(struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; + struct ata_port *ap; unsigned int tag; - WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + ap = qc->ap; qc->flags = 0; tag = qc->tag; @@ -4635,11 +4898,13 @@ void ata_qc_free(struct ata_queued_cmd *qc) void __ata_qc_complete(struct ata_queued_cmd *qc) { - struct ata_port *ap = qc->ap; - struct ata_link *link = qc->dev->link; + struct ata_port *ap; + struct ata_link *link; - WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ - WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); + WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ + WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); + ap = qc->ap; + link = qc->dev->link; if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) ata_sg_clean(qc); @@ -4697,7 +4962,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc) /** * ata_qc_complete - Complete an active ATA command * @qc: Command to complete - * @err_mask: ATA Status register contents * * Indicate to the mid and upper layers that an ATA * command has completed, with either an ok or not-ok status. @@ -4726,20 +4990,22 @@ void ata_qc_complete(struct ata_queued_cmd *qc) struct ata_device *dev = qc->dev; struct ata_eh_info *ehi = &dev->link->eh_info; - WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); - if (unlikely(qc->err_mask)) qc->flags |= ATA_QCFLAG_FAILED; if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { - if (!ata_tag_internal(qc->tag)) { - /* always fill result TF for failed qc */ - fill_result_tf(qc); + /* always fill result TF for failed qc */ + fill_result_tf(qc); + + if (!ata_tag_internal(qc->tag)) ata_qc_schedule_eh(qc); - return; - } + else + __ata_qc_complete(qc); + return; } + WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); + /* read result TF if requested */ if (qc->flags & ATA_QCFLAG_RESULT_TF) fill_result_tf(qc); @@ -4801,7 +5067,6 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) { int nr_done = 0; u32 done_mask; - int i; done_mask = ap->qc_active ^ qc_active; @@ -4811,16 +5076,16 @@ int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) return -EINVAL; } - for (i = 0; i < ATA_MAX_QUEUE; i++) { + while (done_mask) { struct ata_queued_cmd *qc; + unsigned int tag = __ffs(done_mask); - if (!(done_mask & (1 << i))) - continue; - - if ((qc = ata_qc_from_tag(ap, i))) { + qc = ata_qc_from_tag(ap, tag); + if (qc) { ata_qc_complete(qc); nr_done++; } + done_mask &= ~(1 << tag); } return nr_done; @@ -4848,16 +5113,16 @@ void ata_qc_issue(struct ata_queued_cmd *qc) * check is skipped for old EH because it reuses active qc to * request ATAPI sense. */ - WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); + WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); if (ata_is_ncq(prot)) { - WARN_ON(link->sactive & (1 << qc->tag)); + WARN_ON_ONCE(link->sactive & (1 << qc->tag)); if (!link->sactive) ap->nr_active_links++; link->sactive |= 1 << qc->tag; } else { - WARN_ON(link->sactive); + WARN_ON_ONCE(link->sactive); ap->nr_active_links++; link->active_tag = qc->tag; @@ -5021,7 +5286,7 @@ bool ata_phys_link_online(struct ata_link *link) u32 sstatus; if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && - (sstatus & 0xf) == 0x3) + ata_sstatus_online(sstatus)) return true; return false; } @@ -5045,7 +5310,7 @@ bool ata_phys_link_offline(struct ata_link *link) u32 sstatus; if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && - (sstatus & 0xf) != 0x3) + !ata_sstatus_online(sstatus)) return true; return false; } @@ -5132,7 +5397,7 @@ static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, } ap->pflags |= ATA_PFLAG_PM_PENDING; - __ata_port_for_each_link(link, ap) { + ata_for_each_link(link, ap, HOST_FIRST) { link->eh_info.action |= action; link->eh_info.flags |= ehi_flags; } @@ -5207,30 +5472,6 @@ void ata_host_resume(struct ata_host *host) #endif /** - * ata_port_start - Set port up for dma. - * @ap: Port to initialize - * - * Called just after data structures for each port are - * initialized. Allocates space for PRD table. - * - * May be used as the port_start() entry in ata_port_operations. - * - * LOCKING: - * Inherited from caller. - */ -int ata_port_start(struct ata_port *ap) -{ - struct device *dev = ap->dev; - - ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, - GFP_KERNEL); - if (!ap->prd) - return -ENOMEM; - - return 0; -} - -/** * ata_dev_init - Initialize an ata_device structure * @dev: Device structure to initialize * @@ -5258,8 +5499,8 @@ void ata_dev_init(struct ata_device *dev) dev->horkage = 0; spin_unlock_irqrestore(ap->lock, flags); - memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, - sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); + memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, + ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); dev->pio_mask = UINT_MAX; dev->mwdma_mask = UINT_MAX; dev->udma_mask = UINT_MAX; @@ -5294,6 +5535,9 @@ void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) dev->link = link; dev->devno = dev - link->device; +#ifdef CONFIG_ATA_ACPI + dev->gtf_filter = ata_acpi_gtf_filter; +#endif ata_dev_init(dev); } } @@ -5355,12 +5599,9 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->pflags |= ATA_PFLAG_INITIALIZING; ap->lock = &host->lock; - ap->flags = ATA_FLAG_DISABLED; ap->print_id = -1; - ap->ctl = ATA_DEVCTL_OBS; ap->host = host; ap->dev = host->dev; - ap->last_ctl = 0xFF; #if defined(ATA_VERBOSE_DEBUG) /* turn on all debugging levels */ @@ -5371,13 +5612,11 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; #endif -#ifdef CONFIG_ATA_SFF - INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); -#endif INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); + init_completion(&ap->park_req_pending); init_timer_deferrable(&ap->fastdrain_timer); ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; ap->fastdrain_timer.data = (unsigned long)ap; @@ -5390,6 +5629,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) ap->stats.unhandled_irq = 1; ap->stats.idle_irq = 1; #endif + ata_sff_port_init(ap); + return ap; } @@ -5760,6 +6001,63 @@ void ata_host_init(struct ata_host *host, struct device *dev, host->ops = ops; } + +static void async_port_probe(void *data, async_cookie_t cookie) +{ + int rc; + struct ata_port *ap = data; + + /* + * If we're not allowed to scan this host in parallel, + * we need to wait until all previous scans have completed + * before going further. + * Jeff Garzik says this is only within a controller, so we + * don't need to wait for port 0, only for later ports. + */ + if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) + async_synchronize_cookie(cookie); + + /* probe */ + if (ap->ops->error_handler) { + struct ata_eh_info *ehi = &ap->link.eh_info; + unsigned long flags; + + /* kick EH for boot probing */ + spin_lock_irqsave(ap->lock, flags); + + ehi->probe_mask |= ATA_ALL_DEVICES; + ehi->action |= ATA_EH_RESET | ATA_EH_LPM; + ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; + + ap->pflags &= ~ATA_PFLAG_INITIALIZING; + ap->pflags |= ATA_PFLAG_LOADING; + ata_port_schedule_eh(ap); + + spin_unlock_irqrestore(ap->lock, flags); + + /* wait for EH to finish */ + ata_port_wait_eh(ap); + } else { + DPRINTK("ata%u: bus probe begin\n", ap->print_id); + rc = ata_bus_probe(ap); + DPRINTK("ata%u: bus probe end\n", ap->print_id); + + if (rc) { + /* FIXME: do something useful here? + * Current libata behavior will + * tear down everything when + * the module is removed + * or the h/w is unplugged. + */ + } + } + + /* in order to keep device order, we need to synchronize at this point */ + async_synchronize_cookie(cookie); + + ata_scsi_scan_host(ap, 1); + +} /** * ata_host_register - register initialized ATA host * @host: ATA host to register @@ -5835,55 +6133,10 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) ata_port_printk(ap, KERN_INFO, "DUMMY\n"); } - /* perform each probe synchronously */ - DPRINTK("probe begin\n"); - for (i = 0; i < host->n_ports; i++) { - struct ata_port *ap = host->ports[i]; - - /* probe */ - if (ap->ops->error_handler) { - struct ata_eh_info *ehi = &ap->link.eh_info; - unsigned long flags; - - ata_port_probe(ap); - - /* kick EH for boot probing */ - spin_lock_irqsave(ap->lock, flags); - - ehi->probe_mask |= ATA_ALL_DEVICES; - ehi->action |= ATA_EH_RESET | ATA_EH_LPM; - ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; - - ap->pflags &= ~ATA_PFLAG_INITIALIZING; - ap->pflags |= ATA_PFLAG_LOADING; - ata_port_schedule_eh(ap); - - spin_unlock_irqrestore(ap->lock, flags); - - /* wait for EH to finish */ - ata_port_wait_eh(ap); - } else { - DPRINTK("ata%u: bus probe begin\n", ap->print_id); - rc = ata_bus_probe(ap); - DPRINTK("ata%u: bus probe end\n", ap->print_id); - - if (rc) { - /* FIXME: do something useful here? - * Current libata behavior will - * tear down everything when - * the module is removed - * or the h/w is unplugged. - */ - } - } - } - - /* probes are done, now scan each port's disk(s) */ - DPRINTK("host probe begin\n"); + /* perform each probe asynchronously */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; - - ata_scsi_scan_host(ap, 1); + async_schedule(async_port_probe, ap); } return 0; @@ -5958,8 +6211,6 @@ int ata_host_activate(struct ata_host *host, int irq, static void ata_port_detach(struct ata_port *ap) { unsigned long flags; - struct ata_link *link; - struct ata_device *dev; if (!ap->ops->error_handler) goto skip_eh; @@ -5967,28 +6218,15 @@ static void ata_port_detach(struct ata_port *ap) /* tell EH we're leaving & flush EH */ spin_lock_irqsave(ap->lock, flags); ap->pflags |= ATA_PFLAG_UNLOADING; + ata_port_schedule_eh(ap); spin_unlock_irqrestore(ap->lock, flags); + /* wait till EH commits suicide */ ata_port_wait_eh(ap); - /* EH is now guaranteed to see UNLOADING - EH context belongs - * to us. Restore SControl and disable all existing devices. - */ - __ata_port_for_each_link(link, ap) { - sata_scr_write(link, SCR_CONTROL, link->saved_scontrol); - ata_link_for_each_dev(dev, link) - ata_dev_disable(dev); - } - - /* Final freeze & EH. All in-flight commands are aborted. EH - * will be skipped and retrials will be terminated with bad - * target. - */ - spin_lock_irqsave(ap->lock, flags); - ata_port_freeze(ap); /* won't be thawed */ - spin_unlock_irqrestore(ap->lock, flags); + /* it better be dead now */ + WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); - ata_port_wait_eh(ap); cancel_rearming_delayed_work(&ap->hotplug_task); skip_eh: @@ -6147,6 +6385,7 @@ static int __init ata_parse_force_one(char **cur, { "3.0Gbps", .spd_limit = 2 }, { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, + { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, @@ -6307,55 +6546,43 @@ static void __init ata_parse_force_param(void) static int __init ata_init(void) { - ata_parse_force_param(); + int rc = -ENOMEM; - ata_wq = create_workqueue("ata"); - if (!ata_wq) - goto free_force_tbl; + ata_parse_force_param(); ata_aux_wq = create_singlethread_workqueue("ata_aux"); if (!ata_aux_wq) - goto free_wq; + goto fail; + + rc = ata_sff_init(); + if (rc) + goto fail; printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); return 0; -free_wq: - destroy_workqueue(ata_wq); -free_force_tbl: +fail: kfree(ata_force_tbl); - return -ENOMEM; + if (ata_aux_wq) + destroy_workqueue(ata_aux_wq); + return rc; } static void __exit ata_exit(void) { + ata_sff_exit(); kfree(ata_force_tbl); - destroy_workqueue(ata_wq); destroy_workqueue(ata_aux_wq); } subsys_initcall(ata_init); module_exit(ata_exit); -static unsigned long ratelimit_time; -static DEFINE_SPINLOCK(ata_ratelimit_lock); +static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); int ata_ratelimit(void) { - int rc; - unsigned long flags; - - spin_lock_irqsave(&ata_ratelimit_lock, flags); - - if (time_after(jiffies, ratelimit_time)) { - rc = 1; - ratelimit_time = jiffies + (HZ/5); - } else - rc = 0; - - spin_unlock_irqrestore(&ata_ratelimit_lock, flags); - - return rc; + return __ratelimit(&ratelimit); } /** @@ -6439,7 +6666,8 @@ EXPORT_SYMBOL_GPL(ata_base_port_ops); EXPORT_SYMBOL_GPL(sata_port_ops); EXPORT_SYMBOL_GPL(ata_dummy_port_ops); EXPORT_SYMBOL_GPL(ata_dummy_port_info); -EXPORT_SYMBOL_GPL(__ata_port_next_link); +EXPORT_SYMBOL_GPL(ata_link_next); +EXPORT_SYMBOL_GPL(ata_dev_next); EXPORT_SYMBOL_GPL(ata_std_bios_param); EXPORT_SYMBOL_GPL(ata_host_init); EXPORT_SYMBOL_GPL(ata_host_alloc); @@ -6462,11 +6690,9 @@ EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); EXPORT_SYMBOL_GPL(ata_mode_string); EXPORT_SYMBOL_GPL(ata_id_xfermask); -EXPORT_SYMBOL_GPL(ata_port_start); EXPORT_SYMBOL_GPL(ata_do_set_mode); EXPORT_SYMBOL_GPL(ata_std_qc_defer); EXPORT_SYMBOL_GPL(ata_noop_qc_prep); -EXPORT_SYMBOL_GPL(ata_port_probe); EXPORT_SYMBOL_GPL(ata_dev_disable); EXPORT_SYMBOL_GPL(sata_set_spd); EXPORT_SYMBOL_GPL(ata_wait_after_reset); @@ -6478,10 +6704,8 @@ EXPORT_SYMBOL_GPL(sata_std_hardreset); EXPORT_SYMBOL_GPL(ata_std_postreset); EXPORT_SYMBOL_GPL(ata_dev_classify); EXPORT_SYMBOL_GPL(ata_dev_pair); -EXPORT_SYMBOL_GPL(ata_port_disable); EXPORT_SYMBOL_GPL(ata_ratelimit); EXPORT_SYMBOL_GPL(ata_wait_register); -EXPORT_SYMBOL_GPL(ata_scsi_ioctl); EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);