2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 #define DRV_VERSION "2.21" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
74 static void ata_dev_xfermask(struct ata_device *dev);
75 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
77 unsigned int ata_print_id = 1;
78 static struct workqueue_struct *ata_wq;
80 struct workqueue_struct *ata_aux_wq;
82 int atapi_enabled = 1;
83 module_param(atapi_enabled, int, 0444);
84 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
87 module_param(atapi_dmadir, int, 0444);
88 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
90 int atapi_passthru16 = 1;
91 module_param(atapi_passthru16, int, 0444);
92 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
95 module_param_named(fua, libata_fua, int, 0444);
96 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
98 static int ata_ignore_hpa = 0;
99 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
100 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
102 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
103 module_param(ata_probe_timeout, int, 0444);
104 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
106 int libata_noacpi = 1;
107 module_param_named(noacpi, libata_noacpi, int, 0444);
108 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
110 MODULE_AUTHOR("Jeff Garzik");
111 MODULE_DESCRIPTION("Library module for ATA devices");
112 MODULE_LICENSE("GPL");
113 MODULE_VERSION(DRV_VERSION);
117 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
118 * @tf: Taskfile to convert
119 * @pmp: Port multiplier port
120 * @is_cmd: This FIS is for command
121 * @fis: Buffer into which data will output
123 * Converts a standard ATA taskfile to a Serial ATA
124 * FIS structure (Register - Host to Device).
127 * Inherited from caller.
129 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
131 fis[0] = 0x27; /* Register - Host to Device FIS */
132 fis[1] = pmp & 0xf; /* Port multiplier number*/
134 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
136 fis[2] = tf->command;
137 fis[3] = tf->feature;
144 fis[8] = tf->hob_lbal;
145 fis[9] = tf->hob_lbam;
146 fis[10] = tf->hob_lbah;
147 fis[11] = tf->hob_feature;
150 fis[13] = tf->hob_nsect;
161 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
162 * @fis: Buffer from which data will be input
163 * @tf: Taskfile to output
165 * Converts a serial ATA FIS structure to a standard ATA taskfile.
168 * Inherited from caller.
171 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
173 tf->command = fis[2]; /* status */
174 tf->feature = fis[3]; /* error */
181 tf->hob_lbal = fis[8];
182 tf->hob_lbam = fis[9];
183 tf->hob_lbah = fis[10];
186 tf->hob_nsect = fis[13];
189 static const u8 ata_rw_cmds[] = {
193 ATA_CMD_READ_MULTI_EXT,
194 ATA_CMD_WRITE_MULTI_EXT,
198 ATA_CMD_WRITE_MULTI_FUA_EXT,
202 ATA_CMD_PIO_READ_EXT,
203 ATA_CMD_PIO_WRITE_EXT,
216 ATA_CMD_WRITE_FUA_EXT
220 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
221 * @tf: command to examine and configure
222 * @dev: device tf belongs to
224 * Examine the device configuration and tf->flags to calculate
225 * the proper read/write commands and protocol to use.
230 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
234 int index, fua, lba48, write;
236 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
237 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
238 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
240 if (dev->flags & ATA_DFLAG_PIO) {
241 tf->protocol = ATA_PROT_PIO;
242 index = dev->multi_count ? 0 : 8;
243 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
244 /* Unable to use DMA due to host limitation */
245 tf->protocol = ATA_PROT_PIO;
246 index = dev->multi_count ? 0 : 8;
248 tf->protocol = ATA_PROT_DMA;
252 cmd = ata_rw_cmds[index + fua + lba48 + write];
261 * ata_tf_read_block - Read block address from ATA taskfile
262 * @tf: ATA taskfile of interest
263 * @dev: ATA device @tf belongs to
268 * Read block address from @tf. This function can handle all
269 * three address formats - LBA, LBA48 and CHS. tf->protocol and
270 * flags select the address format to use.
273 * Block address read from @tf.
275 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279 if (tf->flags & ATA_TFLAG_LBA) {
280 if (tf->flags & ATA_TFLAG_LBA48) {
281 block |= (u64)tf->hob_lbah << 40;
282 block |= (u64)tf->hob_lbam << 32;
283 block |= tf->hob_lbal << 24;
285 block |= (tf->device & 0xf) << 24;
287 block |= tf->lbah << 16;
288 block |= tf->lbam << 8;
293 cyl = tf->lbam | (tf->lbah << 8);
294 head = tf->device & 0xf;
297 block = (cyl * dev->heads + head) * dev->sectors + sect;
304 * ata_build_rw_tf - Build ATA taskfile for given read/write request
305 * @tf: Target ATA taskfile
306 * @dev: ATA device @tf belongs to
307 * @block: Block address
308 * @n_block: Number of blocks
309 * @tf_flags: RW/FUA etc...
315 * Build ATA taskfile @tf for read/write request described by
316 * @block, @n_block, @tf_flags and @tag on @dev.
320 * 0 on success, -ERANGE if the request is too large for @dev,
321 * -EINVAL if the request is invalid.
323 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
324 u64 block, u32 n_block, unsigned int tf_flags,
327 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
328 tf->flags |= tf_flags;
330 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
332 if (!lba_48_ok(block, n_block))
335 tf->protocol = ATA_PROT_NCQ;
336 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
338 if (tf->flags & ATA_TFLAG_WRITE)
339 tf->command = ATA_CMD_FPDMA_WRITE;
341 tf->command = ATA_CMD_FPDMA_READ;
343 tf->nsect = tag << 3;
344 tf->hob_feature = (n_block >> 8) & 0xff;
345 tf->feature = n_block & 0xff;
347 tf->hob_lbah = (block >> 40) & 0xff;
348 tf->hob_lbam = (block >> 32) & 0xff;
349 tf->hob_lbal = (block >> 24) & 0xff;
350 tf->lbah = (block >> 16) & 0xff;
351 tf->lbam = (block >> 8) & 0xff;
352 tf->lbal = block & 0xff;
355 if (tf->flags & ATA_TFLAG_FUA)
356 tf->device |= 1 << 7;
357 } else if (dev->flags & ATA_DFLAG_LBA) {
358 tf->flags |= ATA_TFLAG_LBA;
360 if (lba_28_ok(block, n_block)) {
362 tf->device |= (block >> 24) & 0xf;
363 } else if (lba_48_ok(block, n_block)) {
364 if (!(dev->flags & ATA_DFLAG_LBA48))
368 tf->flags |= ATA_TFLAG_LBA48;
370 tf->hob_nsect = (n_block >> 8) & 0xff;
372 tf->hob_lbah = (block >> 40) & 0xff;
373 tf->hob_lbam = (block >> 32) & 0xff;
374 tf->hob_lbal = (block >> 24) & 0xff;
376 /* request too large even for LBA48 */
379 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
382 tf->nsect = n_block & 0xff;
384 tf->lbah = (block >> 16) & 0xff;
385 tf->lbam = (block >> 8) & 0xff;
386 tf->lbal = block & 0xff;
388 tf->device |= ATA_LBA;
391 u32 sect, head, cyl, track;
393 /* The request -may- be too large for CHS addressing. */
394 if (!lba_28_ok(block, n_block))
397 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
400 /* Convert LBA to CHS */
401 track = (u32)block / dev->sectors;
402 cyl = track / dev->heads;
403 head = track % dev->heads;
404 sect = (u32)block % dev->sectors + 1;
406 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
407 (u32)block, track, cyl, head, sect);
409 /* Check whether the converted CHS can fit.
413 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
416 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
427 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
428 * @pio_mask: pio_mask
429 * @mwdma_mask: mwdma_mask
430 * @udma_mask: udma_mask
432 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
433 * unsigned int xfer_mask.
441 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
442 unsigned int mwdma_mask,
443 unsigned int udma_mask)
445 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
446 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
447 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
452 * @xfer_mask: xfer_mask to unpack
453 * @pio_mask: resulting pio_mask
454 * @mwdma_mask: resulting mwdma_mask
455 * @udma_mask: resulting udma_mask
457 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
458 * Any NULL distination masks will be ignored.
460 static void ata_unpack_xfermask(unsigned int xfer_mask,
461 unsigned int *pio_mask,
462 unsigned int *mwdma_mask,
463 unsigned int *udma_mask)
466 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
468 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
470 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
473 static const struct ata_xfer_ent {
477 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
478 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
479 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
484 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
485 * @xfer_mask: xfer_mask of interest
487 * Return matching XFER_* value for @xfer_mask. Only the highest
488 * bit of @xfer_mask is considered.
494 * Matching XFER_* value, 0 if no match found.
496 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
498 int highbit = fls(xfer_mask) - 1;
499 const struct ata_xfer_ent *ent;
501 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
502 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
503 return ent->base + highbit - ent->shift;
508 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
509 * @xfer_mode: XFER_* of interest
511 * Return matching xfer_mask for @xfer_mode.
517 * Matching xfer_mask, 0 if no match found.
519 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
521 const struct ata_xfer_ent *ent;
523 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
524 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
525 return 1 << (ent->shift + xfer_mode - ent->base);
530 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
531 * @xfer_mode: XFER_* of interest
533 * Return matching xfer_shift for @xfer_mode.
539 * Matching xfer_shift, -1 if no match found.
541 static int ata_xfer_mode2shift(unsigned int xfer_mode)
543 const struct ata_xfer_ent *ent;
545 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
546 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
552 * ata_mode_string - convert xfer_mask to string
553 * @xfer_mask: mask of bits supported; only highest bit counts.
555 * Determine string which represents the highest speed
556 * (highest bit in @modemask).
562 * Constant C string representing highest speed listed in
563 * @mode_mask, or the constant C string "<n/a>".
565 static const char *ata_mode_string(unsigned int xfer_mask)
567 static const char * const xfer_mode_str[] = {
591 highbit = fls(xfer_mask) - 1;
592 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
593 return xfer_mode_str[highbit];
597 static const char *sata_spd_string(unsigned int spd)
599 static const char * const spd_str[] = {
604 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
606 return spd_str[spd - 1];
609 void ata_dev_disable(struct ata_device *dev)
611 if (ata_dev_enabled(dev)) {
612 if (ata_msg_drv(dev->link->ap))
613 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
614 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
621 * ata_devchk - PATA device presence detection
622 * @ap: ATA channel to examine
623 * @device: Device to examine (starting at zero)
625 * This technique was originally described in
626 * Hale Landis's ATADRVR (www.ata-atapi.com), and
627 * later found its way into the ATA/ATAPI spec.
629 * Write a pattern to the ATA shadow registers,
630 * and if a device is present, it will respond by
631 * correctly storing and echoing back the
632 * ATA shadow register contents.
638 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
640 struct ata_ioports *ioaddr = &ap->ioaddr;
643 ap->ops->dev_select(ap, device);
645 iowrite8(0x55, ioaddr->nsect_addr);
646 iowrite8(0xaa, ioaddr->lbal_addr);
648 iowrite8(0xaa, ioaddr->nsect_addr);
649 iowrite8(0x55, ioaddr->lbal_addr);
651 iowrite8(0x55, ioaddr->nsect_addr);
652 iowrite8(0xaa, ioaddr->lbal_addr);
654 nsect = ioread8(ioaddr->nsect_addr);
655 lbal = ioread8(ioaddr->lbal_addr);
657 if ((nsect == 0x55) && (lbal == 0xaa))
658 return 1; /* we found a device */
660 return 0; /* nothing found */
664 * ata_dev_classify - determine device type based on ATA-spec signature
665 * @tf: ATA taskfile register set for device to be identified
667 * Determine from taskfile register contents whether a device is
668 * ATA or ATAPI, as per "Signature and persistence" section
669 * of ATA/PI spec (volume 1, sect 5.14).
675 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
676 * the event of failure.
679 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
681 /* Apple's open source Darwin code hints that some devices only
682 * put a proper signature into the LBA mid/high registers,
683 * So, we only check those. It's sufficient for uniqueness.
686 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
687 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
688 DPRINTK("found ATA device by sig\n");
692 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
693 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
694 DPRINTK("found ATAPI device by sig\n");
695 return ATA_DEV_ATAPI;
698 DPRINTK("unknown device\n");
699 return ATA_DEV_UNKNOWN;
703 * ata_dev_try_classify - Parse returned ATA device signature
704 * @dev: ATA device to classify (starting at zero)
705 * @present: device seems present
706 * @r_err: Value of error register on completion
708 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
709 * an ATA/ATAPI-defined set of values is placed in the ATA
710 * shadow registers, indicating the results of device detection
713 * Select the ATA device, and read the values from the ATA shadow
714 * registers. Then parse according to the Error register value,
715 * and the spec-defined values examined by ata_dev_classify().
721 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
723 unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
726 struct ata_port *ap = dev->link->ap;
727 struct ata_taskfile tf;
731 ap->ops->dev_select(ap, dev->devno);
733 memset(&tf, 0, sizeof(tf));
735 ap->ops->tf_read(ap, &tf);
740 /* see if device passed diags: if master then continue and warn later */
741 if (err == 0 && dev->devno == 0)
742 /* diagnostic fail : do nothing _YET_ */
743 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
746 else if ((dev->devno == 0) && (err == 0x81))
751 /* determine if device is ATA or ATAPI */
752 class = ata_dev_classify(&tf);
754 if (class == ATA_DEV_UNKNOWN)
756 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
762 * ata_id_string - Convert IDENTIFY DEVICE page into string
763 * @id: IDENTIFY DEVICE results we will examine
764 * @s: string into which data is output
765 * @ofs: offset into identify device page
766 * @len: length of string to return. must be an even number.
768 * The strings in the IDENTIFY DEVICE page are broken up into
769 * 16-bit chunks. Run through the string, and output each
770 * 8-bit chunk linearly, regardless of platform.
776 void ata_id_string(const u16 *id, unsigned char *s,
777 unsigned int ofs, unsigned int len)
796 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
797 * @id: IDENTIFY DEVICE results we will examine
798 * @s: string into which data is output
799 * @ofs: offset into identify device page
800 * @len: length of string to return. must be an odd number.
802 * This function is identical to ata_id_string except that it
803 * trims trailing spaces and terminates the resulting string with
804 * null. @len must be actual maximum length (even number) + 1.
809 void ata_id_c_string(const u16 *id, unsigned char *s,
810 unsigned int ofs, unsigned int len)
816 ata_id_string(id, s, ofs, len - 1);
818 p = s + strnlen(s, len - 1);
819 while (p > s && p[-1] == ' ')
824 static u64 ata_id_n_sectors(const u16 *id)
826 if (ata_id_has_lba(id)) {
827 if (ata_id_has_lba48(id))
828 return ata_id_u64(id, 100);
830 return ata_id_u32(id, 60);
832 if (ata_id_current_chs_valid(id))
833 return ata_id_u32(id, 57);
835 return id[1] * id[3] * id[6];
839 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
843 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
844 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
845 sectors |= (tf->hob_lbal & 0xff) << 24;
846 sectors |= (tf->lbah & 0xff) << 16;
847 sectors |= (tf->lbam & 0xff) << 8;
848 sectors |= (tf->lbal & 0xff);
853 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
857 sectors |= (tf->device & 0x0f) << 24;
858 sectors |= (tf->lbah & 0xff) << 16;
859 sectors |= (tf->lbam & 0xff) << 8;
860 sectors |= (tf->lbal & 0xff);
866 * ata_read_native_max_address - Read native max address
867 * @dev: target device
868 * @max_sectors: out parameter for the result native max address
870 * Perform an LBA48 or LBA28 native size query upon the device in
874 * 0 on success, -EACCES if command is aborted by the drive.
875 * -EIO on other errors.
877 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
879 unsigned int err_mask;
880 struct ata_taskfile tf;
881 int lba48 = ata_id_has_lba48(dev->id);
883 ata_tf_init(dev, &tf);
885 /* always clear all address registers */
886 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
889 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
890 tf.flags |= ATA_TFLAG_LBA48;
892 tf.command = ATA_CMD_READ_NATIVE_MAX;
894 tf.protocol |= ATA_PROT_NODATA;
895 tf.device |= ATA_LBA;
897 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
899 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
900 "max address (err_mask=0x%x)\n", err_mask);
901 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
907 *max_sectors = ata_tf_to_lba48(&tf);
909 *max_sectors = ata_tf_to_lba(&tf);
915 * ata_set_max_sectors - Set max sectors
916 * @dev: target device
917 * @new_sectors: new max sectors value to set for the device
919 * Set max sectors of @dev to @new_sectors.
922 * 0 on success, -EACCES if command is aborted or denied (due to
923 * previous non-volatile SET_MAX) by the drive. -EIO on other
926 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
928 unsigned int err_mask;
929 struct ata_taskfile tf;
930 int lba48 = ata_id_has_lba48(dev->id);
934 ata_tf_init(dev, &tf);
936 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
939 tf.command = ATA_CMD_SET_MAX_EXT;
940 tf.flags |= ATA_TFLAG_LBA48;
942 tf.hob_lbal = (new_sectors >> 24) & 0xff;
943 tf.hob_lbam = (new_sectors >> 32) & 0xff;
944 tf.hob_lbah = (new_sectors >> 40) & 0xff;
946 tf.command = ATA_CMD_SET_MAX;
948 tf.protocol |= ATA_PROT_NODATA;
949 tf.device |= ATA_LBA;
951 tf.lbal = (new_sectors >> 0) & 0xff;
952 tf.lbam = (new_sectors >> 8) & 0xff;
953 tf.lbah = (new_sectors >> 16) & 0xff;
955 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
957 ata_dev_printk(dev, KERN_WARNING, "failed to set "
958 "max address (err_mask=0x%x)\n", err_mask);
959 if (err_mask == AC_ERR_DEV &&
960 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
969 * ata_hpa_resize - Resize a device with an HPA set
970 * @dev: Device to resize
972 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
973 * it if required to the full size of the media. The caller must check
974 * the drive has the HPA feature set enabled.
977 * 0 on success, -errno on failure.
979 static int ata_hpa_resize(struct ata_device *dev)
981 struct ata_eh_context *ehc = &dev->link->eh_context;
982 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
983 u64 sectors = ata_id_n_sectors(dev->id);
987 /* do we need to do it? */
988 if (dev->class != ATA_DEV_ATA ||
989 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
990 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
993 /* read native max address */
994 rc = ata_read_native_max_address(dev, &native_sectors);
996 /* If HPA isn't going to be unlocked, skip HPA
997 * resizing from the next try.
999 if (!ata_ignore_hpa) {
1000 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1001 "broken, will skip HPA handling\n");
1002 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1004 /* we can continue if device aborted the command */
1012 /* nothing to do? */
1013 if (native_sectors <= sectors || !ata_ignore_hpa) {
1014 if (!print_info || native_sectors == sectors)
1017 if (native_sectors > sectors)
1018 ata_dev_printk(dev, KERN_INFO,
1019 "HPA detected: current %llu, native %llu\n",
1020 (unsigned long long)sectors,
1021 (unsigned long long)native_sectors);
1022 else if (native_sectors < sectors)
1023 ata_dev_printk(dev, KERN_WARNING,
1024 "native sectors (%llu) is smaller than "
1026 (unsigned long long)native_sectors,
1027 (unsigned long long)sectors);
1031 /* let's unlock HPA */
1032 rc = ata_set_max_sectors(dev, native_sectors);
1033 if (rc == -EACCES) {
1034 /* if device aborted the command, skip HPA resizing */
1035 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1036 "(%llu -> %llu), skipping HPA handling\n",
1037 (unsigned long long)sectors,
1038 (unsigned long long)native_sectors);
1039 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1044 /* re-read IDENTIFY data */
1045 rc = ata_dev_reread_id(dev, 0);
1047 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1048 "data after HPA resizing\n");
1053 u64 new_sectors = ata_id_n_sectors(dev->id);
1054 ata_dev_printk(dev, KERN_INFO,
1055 "HPA unlocked: %llu -> %llu, native %llu\n",
1056 (unsigned long long)sectors,
1057 (unsigned long long)new_sectors,
1058 (unsigned long long)native_sectors);
1065 * ata_id_to_dma_mode - Identify DMA mode from id block
1066 * @dev: device to identify
1067 * @unknown: mode to assume if we cannot tell
1069 * Set up the timing values for the device based upon the identify
1070 * reported values for the DMA mode. This function is used by drivers
1071 * which rely upon firmware configured modes, but wish to report the
1072 * mode correctly when possible.
1074 * In addition we emit similarly formatted messages to the default
1075 * ata_dev_set_mode handler, in order to provide consistency of
1079 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1084 /* Pack the DMA modes */
1085 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1086 if (dev->id[53] & 0x04)
1087 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1089 /* Select the mode in use */
1090 mode = ata_xfer_mask2mode(mask);
1093 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1094 ata_mode_string(mask));
1096 /* SWDMA perhaps ? */
1098 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1101 /* Configure the device reporting */
1102 dev->xfer_mode = mode;
1103 dev->xfer_shift = ata_xfer_mode2shift(mode);
1107 * ata_noop_dev_select - Select device 0/1 on ATA bus
1108 * @ap: ATA channel to manipulate
1109 * @device: ATA device (numbered from zero) to select
1111 * This function performs no actual function.
1113 * May be used as the dev_select() entry in ata_port_operations.
1118 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1124 * ata_std_dev_select - Select device 0/1 on ATA bus
1125 * @ap: ATA channel to manipulate
1126 * @device: ATA device (numbered from zero) to select
1128 * Use the method defined in the ATA specification to
1129 * make either device 0, or device 1, active on the
1130 * ATA channel. Works with both PIO and MMIO.
1132 * May be used as the dev_select() entry in ata_port_operations.
1138 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1143 tmp = ATA_DEVICE_OBS;
1145 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1147 iowrite8(tmp, ap->ioaddr.device_addr);
1148 ata_pause(ap); /* needed; also flushes, for mmio */
1152 * ata_dev_select - Select device 0/1 on ATA bus
1153 * @ap: ATA channel to manipulate
1154 * @device: ATA device (numbered from zero) to select
1155 * @wait: non-zero to wait for Status register BSY bit to clear
1156 * @can_sleep: non-zero if context allows sleeping
1158 * Use the method defined in the ATA specification to
1159 * make either device 0, or device 1, active on the
1162 * This is a high-level version of ata_std_dev_select(),
1163 * which additionally provides the services of inserting
1164 * the proper pauses and status polling, where needed.
1170 void ata_dev_select(struct ata_port *ap, unsigned int device,
1171 unsigned int wait, unsigned int can_sleep)
1173 if (ata_msg_probe(ap))
1174 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1175 "device %u, wait %u\n", device, wait);
1180 ap->ops->dev_select(ap, device);
1183 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1190 * ata_dump_id - IDENTIFY DEVICE info debugging output
1191 * @id: IDENTIFY DEVICE page to dump
1193 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1200 static inline void ata_dump_id(const u16 *id)
1202 DPRINTK("49==0x%04x "
1212 DPRINTK("80==0x%04x "
1222 DPRINTK("88==0x%04x "
1229 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1230 * @id: IDENTIFY data to compute xfer mask from
1232 * Compute the xfermask for this device. This is not as trivial
1233 * as it seems if we must consider early devices correctly.
1235 * FIXME: pre IDE drive timing (do we care ?).
1243 static unsigned int ata_id_xfermask(const u16 *id)
1245 unsigned int pio_mask, mwdma_mask, udma_mask;
1247 /* Usual case. Word 53 indicates word 64 is valid */
1248 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1249 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1253 /* If word 64 isn't valid then Word 51 high byte holds
1254 * the PIO timing number for the maximum. Turn it into
1257 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1258 if (mode < 5) /* Valid PIO range */
1259 pio_mask = (2 << mode) - 1;
1263 /* But wait.. there's more. Design your standards by
1264 * committee and you too can get a free iordy field to
1265 * process. However its the speeds not the modes that
1266 * are supported... Note drivers using the timing API
1267 * will get this right anyway
1271 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1273 if (ata_id_is_cfa(id)) {
1275 * Process compact flash extended modes
1277 int pio = id[163] & 0x7;
1278 int dma = (id[163] >> 3) & 7;
1281 pio_mask |= (1 << 5);
1283 pio_mask |= (1 << 6);
1285 mwdma_mask |= (1 << 3);
1287 mwdma_mask |= (1 << 4);
1291 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1292 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1294 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1298 * ata_port_queue_task - Queue port_task
1299 * @ap: The ata_port to queue port_task for
1300 * @fn: workqueue function to be scheduled
1301 * @data: data for @fn to use
1302 * @delay: delay time for workqueue function
1304 * Schedule @fn(@data) for execution after @delay jiffies using
1305 * port_task. There is one port_task per port and it's the
1306 * user(low level driver)'s responsibility to make sure that only
1307 * one task is active at any given time.
1309 * libata core layer takes care of synchronization between
1310 * port_task and EH. ata_port_queue_task() may be ignored for EH
1314 * Inherited from caller.
1316 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1317 unsigned long delay)
1319 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1320 ap->port_task_data = data;
1322 /* may fail if ata_port_flush_task() in progress */
1323 queue_delayed_work(ata_wq, &ap->port_task, delay);
1327 * ata_port_flush_task - Flush port_task
1328 * @ap: The ata_port to flush port_task for
1330 * After this function completes, port_task is guranteed not to
1331 * be running or scheduled.
1334 * Kernel thread context (may sleep)
1336 void ata_port_flush_task(struct ata_port *ap)
1340 cancel_rearming_delayed_work(&ap->port_task);
1342 if (ata_msg_ctl(ap))
1343 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1346 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1348 struct completion *waiting = qc->private_data;
1354 * ata_exec_internal_sg - execute libata internal command
1355 * @dev: Device to which the command is sent
1356 * @tf: Taskfile registers for the command and the result
1357 * @cdb: CDB for packet command
1358 * @dma_dir: Data tranfer direction of the command
1359 * @sg: sg list for the data buffer of the command
1360 * @n_elem: Number of sg entries
1362 * Executes libata internal command with timeout. @tf contains
1363 * command on entry and result on return. Timeout and error
1364 * conditions are reported via return value. No recovery action
1365 * is taken after a command times out. It's caller's duty to
1366 * clean up after timeout.
1369 * None. Should be called with kernel context, might sleep.
1372 * Zero on success, AC_ERR_* mask on failure
1374 unsigned ata_exec_internal_sg(struct ata_device *dev,
1375 struct ata_taskfile *tf, const u8 *cdb,
1376 int dma_dir, struct scatterlist *sg,
1377 unsigned int n_elem)
1379 struct ata_link *link = dev->link;
1380 struct ata_port *ap = link->ap;
1381 u8 command = tf->command;
1382 struct ata_queued_cmd *qc;
1383 unsigned int tag, preempted_tag;
1384 u32 preempted_sactive, preempted_qc_active;
1385 DECLARE_COMPLETION_ONSTACK(wait);
1386 unsigned long flags;
1387 unsigned int err_mask;
1390 spin_lock_irqsave(ap->lock, flags);
1392 /* no internal command while frozen */
1393 if (ap->pflags & ATA_PFLAG_FROZEN) {
1394 spin_unlock_irqrestore(ap->lock, flags);
1395 return AC_ERR_SYSTEM;
1398 /* initialize internal qc */
1400 /* XXX: Tag 0 is used for drivers with legacy EH as some
1401 * drivers choke if any other tag is given. This breaks
1402 * ata_tag_internal() test for those drivers. Don't use new
1403 * EH stuff without converting to it.
1405 if (ap->ops->error_handler)
1406 tag = ATA_TAG_INTERNAL;
1410 if (test_and_set_bit(tag, &ap->qc_allocated))
1412 qc = __ata_qc_from_tag(ap, tag);
1420 preempted_tag = link->active_tag;
1421 preempted_sactive = link->sactive;
1422 preempted_qc_active = ap->qc_active;
1423 link->active_tag = ATA_TAG_POISON;
1427 /* prepare & issue qc */
1430 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1431 qc->flags |= ATA_QCFLAG_RESULT_TF;
1432 qc->dma_dir = dma_dir;
1433 if (dma_dir != DMA_NONE) {
1434 unsigned int i, buflen = 0;
1436 for (i = 0; i < n_elem; i++)
1437 buflen += sg[i].length;
1439 ata_sg_init(qc, sg, n_elem);
1440 qc->nbytes = buflen;
1443 qc->private_data = &wait;
1444 qc->complete_fn = ata_qc_complete_internal;
1448 spin_unlock_irqrestore(ap->lock, flags);
1450 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1452 ata_port_flush_task(ap);
1455 spin_lock_irqsave(ap->lock, flags);
1457 /* We're racing with irq here. If we lose, the
1458 * following test prevents us from completing the qc
1459 * twice. If we win, the port is frozen and will be
1460 * cleaned up by ->post_internal_cmd().
1462 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1463 qc->err_mask |= AC_ERR_TIMEOUT;
1465 if (ap->ops->error_handler)
1466 ata_port_freeze(ap);
1468 ata_qc_complete(qc);
1470 if (ata_msg_warn(ap))
1471 ata_dev_printk(dev, KERN_WARNING,
1472 "qc timeout (cmd 0x%x)\n", command);
1475 spin_unlock_irqrestore(ap->lock, flags);
1478 /* do post_internal_cmd */
1479 if (ap->ops->post_internal_cmd)
1480 ap->ops->post_internal_cmd(qc);
1482 /* perform minimal error analysis */
1483 if (qc->flags & ATA_QCFLAG_FAILED) {
1484 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1485 qc->err_mask |= AC_ERR_DEV;
1488 qc->err_mask |= AC_ERR_OTHER;
1490 if (qc->err_mask & ~AC_ERR_OTHER)
1491 qc->err_mask &= ~AC_ERR_OTHER;
1495 spin_lock_irqsave(ap->lock, flags);
1497 *tf = qc->result_tf;
1498 err_mask = qc->err_mask;
1501 link->active_tag = preempted_tag;
1502 link->sactive = preempted_sactive;
1503 ap->qc_active = preempted_qc_active;
1505 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1506 * Until those drivers are fixed, we detect the condition
1507 * here, fail the command with AC_ERR_SYSTEM and reenable the
1510 * Note that this doesn't change any behavior as internal
1511 * command failure results in disabling the device in the
1512 * higher layer for LLDDs without new reset/EH callbacks.
1514 * Kill the following code as soon as those drivers are fixed.
1516 if (ap->flags & ATA_FLAG_DISABLED) {
1517 err_mask |= AC_ERR_SYSTEM;
1521 spin_unlock_irqrestore(ap->lock, flags);
1527 * ata_exec_internal - execute libata internal command
1528 * @dev: Device to which the command is sent
1529 * @tf: Taskfile registers for the command and the result
1530 * @cdb: CDB for packet command
1531 * @dma_dir: Data tranfer direction of the command
1532 * @buf: Data buffer of the command
1533 * @buflen: Length of data buffer
1535 * Wrapper around ata_exec_internal_sg() which takes simple
1536 * buffer instead of sg list.
1539 * None. Should be called with kernel context, might sleep.
1542 * Zero on success, AC_ERR_* mask on failure
1544 unsigned ata_exec_internal(struct ata_device *dev,
1545 struct ata_taskfile *tf, const u8 *cdb,
1546 int dma_dir, void *buf, unsigned int buflen)
1548 struct scatterlist *psg = NULL, sg;
1549 unsigned int n_elem = 0;
1551 if (dma_dir != DMA_NONE) {
1553 sg_init_one(&sg, buf, buflen);
1558 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1562 * ata_do_simple_cmd - execute simple internal command
1563 * @dev: Device to which the command is sent
1564 * @cmd: Opcode to execute
1566 * Execute a 'simple' command, that only consists of the opcode
1567 * 'cmd' itself, without filling any other registers
1570 * Kernel thread context (may sleep).
1573 * Zero on success, AC_ERR_* mask on failure
1575 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1577 struct ata_taskfile tf;
1579 ata_tf_init(dev, &tf);
1582 tf.flags |= ATA_TFLAG_DEVICE;
1583 tf.protocol = ATA_PROT_NODATA;
1585 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1589 * ata_pio_need_iordy - check if iordy needed
1592 * Check if the current speed of the device requires IORDY. Used
1593 * by various controllers for chip configuration.
1596 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1598 /* Controller doesn't support IORDY. Probably a pointless check
1599 as the caller should know this */
1600 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1602 /* PIO3 and higher it is mandatory */
1603 if (adev->pio_mode > XFER_PIO_2)
1605 /* We turn it on when possible */
1606 if (ata_id_has_iordy(adev->id))
1612 * ata_pio_mask_no_iordy - Return the non IORDY mask
1615 * Compute the highest mode possible if we are not using iordy. Return
1616 * -1 if no iordy mode is available.
1619 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1621 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1622 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1623 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1624 /* Is the speed faster than the drive allows non IORDY ? */
1626 /* This is cycle times not frequency - watch the logic! */
1627 if (pio > 240) /* PIO2 is 240nS per cycle */
1628 return 3 << ATA_SHIFT_PIO;
1629 return 7 << ATA_SHIFT_PIO;
1632 return 3 << ATA_SHIFT_PIO;
1636 * ata_dev_read_id - Read ID data from the specified device
1637 * @dev: target device
1638 * @p_class: pointer to class of the target device (may be changed)
1639 * @flags: ATA_READID_* flags
1640 * @id: buffer to read IDENTIFY data into
1642 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1643 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1644 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1645 * for pre-ATA4 drives.
1647 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1648 * now we abort if we hit that case.
1651 * Kernel thread context (may sleep)
1654 * 0 on success, -errno otherwise.
1656 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1657 unsigned int flags, u16 *id)
1659 struct ata_port *ap = dev->link->ap;
1660 unsigned int class = *p_class;
1661 struct ata_taskfile tf;
1662 unsigned int err_mask = 0;
1664 int may_fallback = 1, tried_spinup = 0;
1667 if (ata_msg_ctl(ap))
1668 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1670 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1672 ata_tf_init(dev, &tf);
1676 tf.command = ATA_CMD_ID_ATA;
1679 tf.command = ATA_CMD_ID_ATAPI;
1683 reason = "unsupported class";
1687 tf.protocol = ATA_PROT_PIO;
1689 /* Some devices choke if TF registers contain garbage. Make
1690 * sure those are properly initialized.
1692 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1694 /* Device presence detection is unreliable on some
1695 * controllers. Always poll IDENTIFY if available.
1697 tf.flags |= ATA_TFLAG_POLLING;
1699 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1700 id, sizeof(id[0]) * ATA_ID_WORDS);
1702 if (err_mask & AC_ERR_NODEV_HINT) {
1703 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1704 ap->print_id, dev->devno);
1708 /* Device or controller might have reported the wrong
1709 * device class. Give a shot at the other IDENTIFY if
1710 * the current one is aborted by the device.
1713 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1716 if (class == ATA_DEV_ATA)
1717 class = ATA_DEV_ATAPI;
1719 class = ATA_DEV_ATA;
1724 reason = "I/O error";
1728 /* Falling back doesn't make sense if ID data was read
1729 * successfully at least once.
1733 swap_buf_le16(id, ATA_ID_WORDS);
1737 reason = "device reports invalid type";
1739 if (class == ATA_DEV_ATA) {
1740 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1743 if (ata_id_is_ata(id))
1747 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1750 * Drive powered-up in standby mode, and requires a specific
1751 * SET_FEATURES spin-up subcommand before it will accept
1752 * anything other than the original IDENTIFY command.
1754 ata_tf_init(dev, &tf);
1755 tf.command = ATA_CMD_SET_FEATURES;
1756 tf.feature = SETFEATURES_SPINUP;
1757 tf.protocol = ATA_PROT_NODATA;
1758 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1759 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1760 if (err_mask && id[2] != 0x738c) {
1762 reason = "SPINUP failed";
1766 * If the drive initially returned incomplete IDENTIFY info,
1767 * we now must reissue the IDENTIFY command.
1769 if (id[2] == 0x37c8)
1773 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1775 * The exact sequence expected by certain pre-ATA4 drives is:
1777 * IDENTIFY (optional in early ATA)
1778 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
1780 * Some drives were very specific about that exact sequence.
1782 * Note that ATA4 says lba is mandatory so the second check
1783 * shoud never trigger.
1785 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1786 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1789 reason = "INIT_DEV_PARAMS failed";
1793 /* current CHS translation info (id[53-58]) might be
1794 * changed. reread the identify device info.
1796 flags &= ~ATA_READID_POSTRESET;
1806 if (ata_msg_warn(ap))
1807 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1808 "(%s, err_mask=0x%x)\n", reason, err_mask);
1812 static inline u8 ata_dev_knobble(struct ata_device *dev)
1814 struct ata_port *ap = dev->link->ap;
1815 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1818 static void ata_dev_config_ncq(struct ata_device *dev,
1819 char *desc, size_t desc_sz)
1821 struct ata_port *ap = dev->link->ap;
1822 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1824 if (!ata_id_has_ncq(dev->id)) {
1828 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1829 snprintf(desc, desc_sz, "NCQ (not used)");
1832 if (ap->flags & ATA_FLAG_NCQ) {
1833 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1834 dev->flags |= ATA_DFLAG_NCQ;
1837 if (hdepth >= ddepth)
1838 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1840 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1844 * ata_dev_configure - Configure the specified ATA/ATAPI device
1845 * @dev: Target device to configure
1847 * Configure @dev according to @dev->id. Generic and low-level
1848 * driver specific fixups are also applied.
1851 * Kernel thread context (may sleep)
1854 * 0 on success, -errno otherwise
1856 int ata_dev_configure(struct ata_device *dev)
1858 struct ata_port *ap = dev->link->ap;
1859 struct ata_eh_context *ehc = &dev->link->eh_context;
1860 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1861 const u16 *id = dev->id;
1862 unsigned int xfer_mask;
1863 char revbuf[7]; /* XYZ-99\0 */
1864 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1865 char modelbuf[ATA_ID_PROD_LEN+1];
1868 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1869 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1874 if (ata_msg_probe(ap))
1875 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1878 dev->horkage |= ata_dev_blacklisted(dev);
1880 /* let ACPI work its magic */
1881 rc = ata_acpi_on_devcfg(dev);
1885 /* massage HPA, do it early as it might change IDENTIFY data */
1886 rc = ata_hpa_resize(dev);
1890 /* print device capabilities */
1891 if (ata_msg_probe(ap))
1892 ata_dev_printk(dev, KERN_DEBUG,
1893 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1894 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1896 id[49], id[82], id[83], id[84],
1897 id[85], id[86], id[87], id[88]);
1899 /* initialize to-be-configured parameters */
1900 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1901 dev->max_sectors = 0;
1909 * common ATA, ATAPI feature tests
1912 /* find max transfer mode; for printk only */
1913 xfer_mask = ata_id_xfermask(id);
1915 if (ata_msg_probe(ap))
1918 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1919 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1922 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1925 /* ATA-specific feature tests */
1926 if (dev->class == ATA_DEV_ATA) {
1927 if (ata_id_is_cfa(id)) {
1928 if (id[162] & 1) /* CPRM may make this media unusable */
1929 ata_dev_printk(dev, KERN_WARNING,
1930 "supports DRM functions and may "
1931 "not be fully accessable.\n");
1932 snprintf(revbuf, 7, "CFA");
1935 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1937 dev->n_sectors = ata_id_n_sectors(id);
1939 if (dev->id[59] & 0x100)
1940 dev->multi_count = dev->id[59] & 0xff;
1942 if (ata_id_has_lba(id)) {
1943 const char *lba_desc;
1947 dev->flags |= ATA_DFLAG_LBA;
1948 if (ata_id_has_lba48(id)) {
1949 dev->flags |= ATA_DFLAG_LBA48;
1952 if (dev->n_sectors >= (1UL << 28) &&
1953 ata_id_has_flush_ext(id))
1954 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1958 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1960 /* print device info to dmesg */
1961 if (ata_msg_drv(ap) && print_info) {
1962 ata_dev_printk(dev, KERN_INFO,
1963 "%s: %s, %s, max %s\n",
1964 revbuf, modelbuf, fwrevbuf,
1965 ata_mode_string(xfer_mask));
1966 ata_dev_printk(dev, KERN_INFO,
1967 "%Lu sectors, multi %u: %s %s\n",
1968 (unsigned long long)dev->n_sectors,
1969 dev->multi_count, lba_desc, ncq_desc);
1974 /* Default translation */
1975 dev->cylinders = id[1];
1977 dev->sectors = id[6];
1979 if (ata_id_current_chs_valid(id)) {
1980 /* Current CHS translation is valid. */
1981 dev->cylinders = id[54];
1982 dev->heads = id[55];
1983 dev->sectors = id[56];
1986 /* print device info to dmesg */
1987 if (ata_msg_drv(ap) && print_info) {
1988 ata_dev_printk(dev, KERN_INFO,
1989 "%s: %s, %s, max %s\n",
1990 revbuf, modelbuf, fwrevbuf,
1991 ata_mode_string(xfer_mask));
1992 ata_dev_printk(dev, KERN_INFO,
1993 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1994 (unsigned long long)dev->n_sectors,
1995 dev->multi_count, dev->cylinders,
1996 dev->heads, dev->sectors);
2003 /* ATAPI-specific feature tests */
2004 else if (dev->class == ATA_DEV_ATAPI) {
2005 char *cdb_intr_string = "";
2007 rc = atapi_cdb_len(id);
2008 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
2009 if (ata_msg_warn(ap))
2010 ata_dev_printk(dev, KERN_WARNING,
2011 "unsupported CDB len\n");
2015 dev->cdb_len = (unsigned int) rc;
2018 * check to see if this ATAPI device supports
2019 * Asynchronous Notification
2021 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_AN(id)) {
2023 /* issue SET feature command to turn this on */
2024 err = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2026 ata_dev_printk(dev, KERN_ERR,
2027 "unable to set AN, err %x\n",
2030 dev->flags |= ATA_DFLAG_AN;
2033 if (ata_id_cdb_intr(dev->id)) {
2034 dev->flags |= ATA_DFLAG_CDB_INTR;
2035 cdb_intr_string = ", CDB intr";
2038 /* print device info to dmesg */
2039 if (ata_msg_drv(ap) && print_info)
2040 ata_dev_printk(dev, KERN_INFO,
2041 "ATAPI: %s, %s, max %s%s\n",
2043 ata_mode_string(xfer_mask),
2047 /* determine max_sectors */
2048 dev->max_sectors = ATA_MAX_SECTORS;
2049 if (dev->flags & ATA_DFLAG_LBA48)
2050 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2052 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2053 /* Let the user know. We don't want to disallow opens for
2054 rescue purposes, or in case the vendor is just a blithering
2057 ata_dev_printk(dev, KERN_WARNING,
2058 "Drive reports diagnostics failure. This may indicate a drive\n");
2059 ata_dev_printk(dev, KERN_WARNING,
2060 "fault or invalid emulation. Contact drive vendor for information.\n");
2064 /* limit bridge transfers to udma5, 200 sectors */
2065 if (ata_dev_knobble(dev)) {
2066 if (ata_msg_drv(ap) && print_info)
2067 ata_dev_printk(dev, KERN_INFO,
2068 "applying bridge limits\n");
2069 dev->udma_mask &= ATA_UDMA5;
2070 dev->max_sectors = ATA_MAX_SECTORS;
2073 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2074 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2077 if (ap->ops->dev_config)
2078 ap->ops->dev_config(dev);
2080 if (ata_msg_probe(ap))
2081 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2082 __FUNCTION__, ata_chk_status(ap));
2086 if (ata_msg_probe(ap))
2087 ata_dev_printk(dev, KERN_DEBUG,
2088 "%s: EXIT, err\n", __FUNCTION__);
2093 * ata_cable_40wire - return 40 wire cable type
2096 * Helper method for drivers which want to hardwire 40 wire cable
2100 int ata_cable_40wire(struct ata_port *ap)
2102 return ATA_CBL_PATA40;
2106 * ata_cable_80wire - return 80 wire cable type
2109 * Helper method for drivers which want to hardwire 80 wire cable
2113 int ata_cable_80wire(struct ata_port *ap)
2115 return ATA_CBL_PATA80;
2119 * ata_cable_unknown - return unknown PATA cable.
2122 * Helper method for drivers which have no PATA cable detection.
2125 int ata_cable_unknown(struct ata_port *ap)
2127 return ATA_CBL_PATA_UNK;
2131 * ata_cable_sata - return SATA cable type
2134 * Helper method for drivers which have SATA cables
2137 int ata_cable_sata(struct ata_port *ap)
2139 return ATA_CBL_SATA;
2143 * ata_bus_probe - Reset and probe ATA bus
2146 * Master ATA bus probing function. Initiates a hardware-dependent
2147 * bus reset, then attempts to identify any devices found on
2151 * PCI/etc. bus probe sem.
2154 * Zero on success, negative errno otherwise.
2157 int ata_bus_probe(struct ata_port *ap)
2159 unsigned int classes[ATA_MAX_DEVICES];
2160 int tries[ATA_MAX_DEVICES];
2162 struct ata_device *dev;
2166 ata_link_for_each_dev(dev, &ap->link)
2167 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
2170 /* reset and determine device classes */
2171 ap->ops->phy_reset(ap);
2173 ata_link_for_each_dev(dev, &ap->link) {
2174 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2175 dev->class != ATA_DEV_UNKNOWN)
2176 classes[dev->devno] = dev->class;
2178 classes[dev->devno] = ATA_DEV_NONE;
2180 dev->class = ATA_DEV_UNKNOWN;
2185 /* after the reset the device state is PIO 0 and the controller
2186 state is undefined. Record the mode */
2188 ata_link_for_each_dev(dev, &ap->link)
2189 dev->pio_mode = XFER_PIO_0;
2191 /* read IDENTIFY page and configure devices. We have to do the identify
2192 specific sequence bass-ackwards so that PDIAG- is released by
2195 ata_link_for_each_dev(dev, &ap->link) {
2196 if (tries[dev->devno])
2197 dev->class = classes[dev->devno];
2199 if (!ata_dev_enabled(dev))
2202 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2208 /* Now ask for the cable type as PDIAG- should have been released */
2209 if (ap->ops->cable_detect)
2210 ap->cbl = ap->ops->cable_detect(ap);
2212 /* We may have SATA bridge glue hiding here irrespective of the
2213 reported cable types and sensed types */
2214 ata_link_for_each_dev(dev, &ap->link) {
2215 if (!ata_dev_enabled(dev))
2217 /* SATA drives indicate we have a bridge. We don't know which
2218 end of the link the bridge is which is a problem */
2219 if (ata_id_is_sata(dev->id))
2220 ap->cbl = ATA_CBL_SATA;
2223 /* After the identify sequence we can now set up the devices. We do
2224 this in the normal order so that the user doesn't get confused */
2226 ata_link_for_each_dev(dev, &ap->link) {
2227 if (!ata_dev_enabled(dev))
2230 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2231 rc = ata_dev_configure(dev);
2232 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2237 /* configure transfer mode */
2238 rc = ata_set_mode(&ap->link, &dev);
2242 ata_link_for_each_dev(dev, &ap->link)
2243 if (ata_dev_enabled(dev))
2246 /* no device present, disable port */
2247 ata_port_disable(ap);
2251 tries[dev->devno]--;
2255 /* eeek, something went very wrong, give up */
2256 tries[dev->devno] = 0;
2260 /* give it just one more chance */
2261 tries[dev->devno] = min(tries[dev->devno], 1);
2263 if (tries[dev->devno] == 1) {
2264 /* This is the last chance, better to slow
2265 * down than lose it.
2267 sata_down_spd_limit(&ap->link);
2268 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2272 if (!tries[dev->devno])
2273 ata_dev_disable(dev);
2279 * ata_port_probe - Mark port as enabled
2280 * @ap: Port for which we indicate enablement
2282 * Modify @ap data structure such that the system
2283 * thinks that the entire port is enabled.
2285 * LOCKING: host lock, or some other form of
2289 void ata_port_probe(struct ata_port *ap)
2291 ap->flags &= ~ATA_FLAG_DISABLED;
2295 * sata_print_link_status - Print SATA link status
2296 * @link: SATA link to printk link status about
2298 * This function prints link speed and status of a SATA link.
2303 void sata_print_link_status(struct ata_link *link)
2305 u32 sstatus, scontrol, tmp;
2307 if (sata_scr_read(link, SCR_STATUS, &sstatus))
2309 sata_scr_read(link, SCR_CONTROL, &scontrol);
2311 if (ata_link_online(link)) {
2312 tmp = (sstatus >> 4) & 0xf;
2313 ata_link_printk(link, KERN_INFO,
2314 "SATA link up %s (SStatus %X SControl %X)\n",
2315 sata_spd_string(tmp), sstatus, scontrol);
2317 ata_link_printk(link, KERN_INFO,
2318 "SATA link down (SStatus %X SControl %X)\n",
2324 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2325 * @ap: SATA port associated with target SATA PHY.
2327 * This function issues commands to standard SATA Sxxx
2328 * PHY registers, to wake up the phy (and device), and
2329 * clear any reset condition.
2332 * PCI/etc. bus probe sem.
2335 void __sata_phy_reset(struct ata_port *ap)
2337 struct ata_link *link = &ap->link;
2338 unsigned long timeout = jiffies + (HZ * 5);
2341 if (ap->flags & ATA_FLAG_SATA_RESET) {
2342 /* issue phy wake/reset */
2343 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
2344 /* Couldn't find anything in SATA I/II specs, but
2345 * AHCI-1.1 10.4.2 says at least 1 ms. */
2348 /* phy wake/clear reset */
2349 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
2351 /* wait for phy to become ready, if necessary */
2354 sata_scr_read(link, SCR_STATUS, &sstatus);
2355 if ((sstatus & 0xf) != 1)
2357 } while (time_before(jiffies, timeout));
2359 /* print link status */
2360 sata_print_link_status(link);
2362 /* TODO: phy layer with polling, timeouts, etc. */
2363 if (!ata_link_offline(link))
2366 ata_port_disable(ap);
2368 if (ap->flags & ATA_FLAG_DISABLED)
2371 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2372 ata_port_disable(ap);
2376 ap->cbl = ATA_CBL_SATA;
2380 * sata_phy_reset - Reset SATA bus.
2381 * @ap: SATA port associated with target SATA PHY.
2383 * This function resets the SATA bus, and then probes
2384 * the bus for devices.
2387 * PCI/etc. bus probe sem.
2390 void sata_phy_reset(struct ata_port *ap)
2392 __sata_phy_reset(ap);
2393 if (ap->flags & ATA_FLAG_DISABLED)
2399 * ata_dev_pair - return other device on cable
2402 * Obtain the other device on the same cable, or if none is
2403 * present NULL is returned
2406 struct ata_device *ata_dev_pair(struct ata_device *adev)
2408 struct ata_link *link = adev->link;
2409 struct ata_device *pair = &link->device[1 - adev->devno];
2410 if (!ata_dev_enabled(pair))
2416 * ata_port_disable - Disable port.
2417 * @ap: Port to be disabled.
2419 * Modify @ap data structure such that the system
2420 * thinks that the entire port is disabled, and should
2421 * never attempt to probe or communicate with devices
2424 * LOCKING: host lock, or some other form of
2428 void ata_port_disable(struct ata_port *ap)
2430 ap->link.device[0].class = ATA_DEV_NONE;
2431 ap->link.device[1].class = ATA_DEV_NONE;
2432 ap->flags |= ATA_FLAG_DISABLED;
2436 * sata_down_spd_limit - adjust SATA spd limit downward
2437 * @link: Link to adjust SATA spd limit for
2439 * Adjust SATA spd limit of @link downward. Note that this
2440 * function only adjusts the limit. The change must be applied
2441 * using sata_set_spd().
2444 * Inherited from caller.
2447 * 0 on success, negative errno on failure
2449 int sata_down_spd_limit(struct ata_link *link)
2451 u32 sstatus, spd, mask;
2454 if (!sata_scr_valid(link))
2457 /* If SCR can be read, use it to determine the current SPD.
2458 * If not, use cached value in link->sata_spd.
2460 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
2462 spd = (sstatus >> 4) & 0xf;
2464 spd = link->sata_spd;
2466 mask = link->sata_spd_limit;
2470 /* unconditionally mask off the highest bit */
2471 highbit = fls(mask) - 1;
2472 mask &= ~(1 << highbit);
2474 /* Mask off all speeds higher than or equal to the current
2475 * one. Force 1.5Gbps if current SPD is not available.
2478 mask &= (1 << (spd - 1)) - 1;
2482 /* were we already at the bottom? */
2486 link->sata_spd_limit = mask;
2488 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
2489 sata_spd_string(fls(mask)));
2494 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
2498 if (link->sata_spd_limit == UINT_MAX)
2501 limit = fls(link->sata_spd_limit);
2503 spd = (*scontrol >> 4) & 0xf;
2504 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2506 return spd != limit;
2510 * sata_set_spd_needed - is SATA spd configuration needed
2511 * @link: Link in question
2513 * Test whether the spd limit in SControl matches
2514 * @link->sata_spd_limit. This function is used to determine
2515 * whether hardreset is necessary to apply SATA spd
2519 * Inherited from caller.
2522 * 1 if SATA spd configuration is needed, 0 otherwise.
2524 int sata_set_spd_needed(struct ata_link *link)
2528 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
2531 return __sata_set_spd_needed(link, &scontrol);
2535 * sata_set_spd - set SATA spd according to spd limit
2536 * @link: Link to set SATA spd for
2538 * Set SATA spd of @link according to sata_spd_limit.
2541 * Inherited from caller.
2544 * 0 if spd doesn't need to be changed, 1 if spd has been
2545 * changed. Negative errno if SCR registers are inaccessible.
2547 int sata_set_spd(struct ata_link *link)
2552 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
2555 if (!__sata_set_spd_needed(link, &scontrol))
2558 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
2565 * This mode timing computation functionality is ported over from
2566 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2569 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2570 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2571 * for UDMA6, which is currently supported only by Maxtor drives.
2573 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2576 static const struct ata_timing ata_timing[] = {
2578 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2579 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2580 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2581 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2583 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2584 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2585 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2586 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2587 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2589 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2591 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2592 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2593 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2595 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2596 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2597 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2599 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2600 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2601 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2602 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2604 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2605 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2606 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2608 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2613 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2614 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2616 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2618 q->setup = EZ(t->setup * 1000, T);
2619 q->act8b = EZ(t->act8b * 1000, T);
2620 q->rec8b = EZ(t->rec8b * 1000, T);
2621 q->cyc8b = EZ(t->cyc8b * 1000, T);
2622 q->active = EZ(t->active * 1000, T);
2623 q->recover = EZ(t->recover * 1000, T);
2624 q->cycle = EZ(t->cycle * 1000, T);
2625 q->udma = EZ(t->udma * 1000, UT);
2628 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2629 struct ata_timing *m, unsigned int what)
2631 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2632 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2633 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2634 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2635 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2636 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2637 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2638 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2641 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2643 const struct ata_timing *t;
2645 for (t = ata_timing; t->mode != speed; t++)
2646 if (t->mode == 0xFF)
2651 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2652 struct ata_timing *t, int T, int UT)
2654 const struct ata_timing *s;
2655 struct ata_timing p;
2661 if (!(s = ata_timing_find_mode(speed)))
2664 memcpy(t, s, sizeof(*s));
2667 * If the drive is an EIDE drive, it can tell us it needs extended
2668 * PIO/MW_DMA cycle timing.
2671 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2672 memset(&p, 0, sizeof(p));
2673 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2674 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2675 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2676 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2677 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2679 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2683 * Convert the timing to bus clock counts.
2686 ata_timing_quantize(t, t, T, UT);
2689 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2690 * S.M.A.R.T * and some other commands. We have to ensure that the
2691 * DMA cycle timing is slower/equal than the fastest PIO timing.
2694 if (speed > XFER_PIO_6) {
2695 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2696 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2700 * Lengthen active & recovery time so that cycle time is correct.
2703 if (t->act8b + t->rec8b < t->cyc8b) {
2704 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2705 t->rec8b = t->cyc8b - t->act8b;
2708 if (t->active + t->recover < t->cycle) {
2709 t->active += (t->cycle - (t->active + t->recover)) / 2;
2710 t->recover = t->cycle - t->active;
2713 /* In a few cases quantisation may produce enough errors to
2714 leave t->cycle too low for the sum of active and recovery
2715 if so we must correct this */
2716 if (t->active + t->recover > t->cycle)
2717 t->cycle = t->active + t->recover;
2723 * ata_down_xfermask_limit - adjust dev xfer masks downward
2724 * @dev: Device to adjust xfer masks
2725 * @sel: ATA_DNXFER_* selector
2727 * Adjust xfer masks of @dev downward. Note that this function
2728 * does not apply the change. Invoking ata_set_mode() afterwards
2729 * will apply the limit.
2732 * Inherited from caller.
2735 * 0 on success, negative errno on failure
2737 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2740 unsigned int orig_mask, xfer_mask;
2741 unsigned int pio_mask, mwdma_mask, udma_mask;
2744 quiet = !!(sel & ATA_DNXFER_QUIET);
2745 sel &= ~ATA_DNXFER_QUIET;
2747 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2750 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2753 case ATA_DNXFER_PIO:
2754 highbit = fls(pio_mask) - 1;
2755 pio_mask &= ~(1 << highbit);
2758 case ATA_DNXFER_DMA:
2760 highbit = fls(udma_mask) - 1;
2761 udma_mask &= ~(1 << highbit);
2764 } else if (mwdma_mask) {
2765 highbit = fls(mwdma_mask) - 1;
2766 mwdma_mask &= ~(1 << highbit);
2772 case ATA_DNXFER_40C:
2773 udma_mask &= ATA_UDMA_MASK_40C;
2776 case ATA_DNXFER_FORCE_PIO0:
2778 case ATA_DNXFER_FORCE_PIO:
2787 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2789 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2793 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2794 snprintf(buf, sizeof(buf), "%s:%s",
2795 ata_mode_string(xfer_mask),
2796 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2798 snprintf(buf, sizeof(buf), "%s",
2799 ata_mode_string(xfer_mask));
2801 ata_dev_printk(dev, KERN_WARNING,
2802 "limiting speed to %s\n", buf);
2805 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2811 static int ata_dev_set_mode(struct ata_device *dev)
2813 struct ata_eh_context *ehc = &dev->link->eh_context;
2814 unsigned int err_mask;
2817 dev->flags &= ~ATA_DFLAG_PIO;
2818 if (dev->xfer_shift == ATA_SHIFT_PIO)
2819 dev->flags |= ATA_DFLAG_PIO;
2821 err_mask = ata_dev_set_xfermode(dev);
2822 /* Old CFA may refuse this command, which is just fine */
2823 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2824 err_mask &= ~AC_ERR_DEV;
2825 /* Some very old devices and some bad newer ones fail any kind of
2826 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2827 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2828 dev->pio_mode <= XFER_PIO_2)
2829 err_mask &= ~AC_ERR_DEV;
2831 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2832 "(err_mask=0x%x)\n", err_mask);
2836 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2837 rc = ata_dev_revalidate(dev, 0);
2838 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2842 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2843 dev->xfer_shift, (int)dev->xfer_mode);
2845 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2846 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2851 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2852 * @link: link on which timings will be programmed
2853 * @r_failed_dev: out paramter for failed device
2855 * Standard implementation of the function used to tune and set
2856 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2857 * ata_dev_set_mode() fails, pointer to the failing device is
2858 * returned in @r_failed_dev.
2861 * PCI/etc. bus probe sem.
2864 * 0 on success, negative errno otherwise
2867 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2869 struct ata_port *ap = link->ap;
2870 struct ata_device *dev;
2871 int rc = 0, used_dma = 0, found = 0;
2873 /* step 1: calculate xfer_mask */
2874 ata_link_for_each_dev(dev, link) {
2875 unsigned int pio_mask, dma_mask;
2877 if (!ata_dev_enabled(dev))
2880 ata_dev_xfermask(dev);
2882 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2883 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2884 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2885 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2894 /* step 2: always set host PIO timings */
2895 ata_link_for_each_dev(dev, link) {
2896 if (!ata_dev_enabled(dev))
2899 if (!dev->pio_mode) {
2900 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2905 dev->xfer_mode = dev->pio_mode;
2906 dev->xfer_shift = ATA_SHIFT_PIO;
2907 if (ap->ops->set_piomode)
2908 ap->ops->set_piomode(ap, dev);
2911 /* step 3: set host DMA timings */
2912 ata_link_for_each_dev(dev, link) {
2913 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2916 dev->xfer_mode = dev->dma_mode;
2917 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2918 if (ap->ops->set_dmamode)
2919 ap->ops->set_dmamode(ap, dev);
2922 /* step 4: update devices' xfer mode */
2923 ata_link_for_each_dev(dev, link) {
2924 /* don't update suspended devices' xfer mode */
2925 if (!ata_dev_enabled(dev))
2928 rc = ata_dev_set_mode(dev);
2933 /* Record simplex status. If we selected DMA then the other
2934 * host channels are not permitted to do so.
2936 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2937 ap->host->simplex_claimed = ap;
2941 *r_failed_dev = dev;
2946 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2947 * @link: link on which timings will be programmed
2948 * @r_failed_dev: out paramter for failed device
2950 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2951 * ata_set_mode() fails, pointer to the failing device is
2952 * returned in @r_failed_dev.
2955 * PCI/etc. bus probe sem.
2958 * 0 on success, negative errno otherwise
2960 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
2962 struct ata_port *ap = link->ap;
2964 /* has private set_mode? */
2965 if (ap->ops->set_mode)
2966 return ap->ops->set_mode(link, r_failed_dev);
2967 return ata_do_set_mode(link, r_failed_dev);
2971 * ata_tf_to_host - issue ATA taskfile to host controller
2972 * @ap: port to which command is being issued
2973 * @tf: ATA taskfile register set
2975 * Issues ATA taskfile register set to ATA host controller,
2976 * with proper synchronization with interrupt handler and
2980 * spin_lock_irqsave(host lock)
2983 static inline void ata_tf_to_host(struct ata_port *ap,
2984 const struct ata_taskfile *tf)
2986 ap->ops->tf_load(ap, tf);
2987 ap->ops->exec_command(ap, tf);
2991 * ata_busy_sleep - sleep until BSY clears, or timeout
2992 * @ap: port containing status register to be polled
2993 * @tmout_pat: impatience timeout
2994 * @tmout: overall timeout
2996 * Sleep until ATA Status register bit BSY clears,
2997 * or a timeout occurs.
3000 * Kernel thread context (may sleep).
3003 * 0 on success, -errno otherwise.
3005 int ata_busy_sleep(struct ata_port *ap,
3006 unsigned long tmout_pat, unsigned long tmout)
3008 unsigned long timer_start, timeout;
3011 status = ata_busy_wait(ap, ATA_BUSY, 300);
3012 timer_start = jiffies;
3013 timeout = timer_start + tmout_pat;
3014 while (status != 0xff && (status & ATA_BUSY) &&
3015 time_before(jiffies, timeout)) {
3017 status = ata_busy_wait(ap, ATA_BUSY, 3);
3020 if (status != 0xff && (status & ATA_BUSY))
3021 ata_port_printk(ap, KERN_WARNING,
3022 "port is slow to respond, please be patient "
3023 "(Status 0x%x)\n", status);
3025 timeout = timer_start + tmout;
3026 while (status != 0xff && (status & ATA_BUSY) &&
3027 time_before(jiffies, timeout)) {
3029 status = ata_chk_status(ap);
3035 if (status & ATA_BUSY) {
3036 ata_port_printk(ap, KERN_ERR, "port failed to respond "
3037 "(%lu secs, Status 0x%x)\n",
3038 tmout / HZ, status);
3046 * ata_wait_ready - sleep until BSY clears, or timeout
3047 * @ap: port containing status register to be polled
3048 * @deadline: deadline jiffies for the operation
3050 * Sleep until ATA Status register bit BSY clears, or timeout
3054 * Kernel thread context (may sleep).
3057 * 0 on success, -errno otherwise.
3059 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3061 unsigned long start = jiffies;
3065 u8 status = ata_chk_status(ap);
3066 unsigned long now = jiffies;
3068 if (!(status & ATA_BUSY))
3070 if (!ata_link_online(&ap->link) && status == 0xff)
3072 if (time_after(now, deadline))
3075 if (!warned && time_after(now, start + 5 * HZ) &&
3076 (deadline - now > 3 * HZ)) {
3077 ata_port_printk(ap, KERN_WARNING,
3078 "port is slow to respond, please be patient "
3079 "(Status 0x%x)\n", status);
3087 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3088 unsigned long deadline)
3090 struct ata_ioports *ioaddr = &ap->ioaddr;
3091 unsigned int dev0 = devmask & (1 << 0);
3092 unsigned int dev1 = devmask & (1 << 1);
3095 /* if device 0 was found in ata_devchk, wait for its
3099 rc = ata_wait_ready(ap, deadline);
3107 /* if device 1 was found in ata_devchk, wait for register
3108 * access briefly, then wait for BSY to clear.
3113 ap->ops->dev_select(ap, 1);
3115 /* Wait for register access. Some ATAPI devices fail
3116 * to set nsect/lbal after reset, so don't waste too
3117 * much time on it. We're gonna wait for !BSY anyway.
3119 for (i = 0; i < 2; i++) {
3122 nsect = ioread8(ioaddr->nsect_addr);
3123 lbal = ioread8(ioaddr->lbal_addr);
3124 if ((nsect == 1) && (lbal == 1))
3126 msleep(50); /* give drive a breather */
3129 rc = ata_wait_ready(ap, deadline);
3137 /* is all this really necessary? */
3138 ap->ops->dev_select(ap, 0);
3140 ap->ops->dev_select(ap, 1);
3142 ap->ops->dev_select(ap, 0);
3147 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3148 unsigned long deadline)
3150 struct ata_ioports *ioaddr = &ap->ioaddr;
3152 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3154 /* software reset. causes dev0 to be selected */
3155 iowrite8(ap->ctl, ioaddr->ctl_addr);
3156 udelay(20); /* FIXME: flush */
3157 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3158 udelay(20); /* FIXME: flush */
3159 iowrite8(ap->ctl, ioaddr->ctl_addr);
3161 /* spec mandates ">= 2ms" before checking status.
3162 * We wait 150ms, because that was the magic delay used for
3163 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3164 * between when the ATA command register is written, and then
3165 * status is checked. Because waiting for "a while" before
3166 * checking status is fine, post SRST, we perform this magic
3167 * delay here as well.
3169 * Old drivers/ide uses the 2mS rule and then waits for ready
3173 /* Before we perform post reset processing we want to see if
3174 * the bus shows 0xFF because the odd clown forgets the D7
3175 * pulldown resistor.
3177 if (ata_check_status(ap) == 0xFF)
3180 return ata_bus_post_reset(ap, devmask, deadline);
3184 * ata_bus_reset - reset host port and associated ATA channel
3185 * @ap: port to reset
3187 * This is typically the first time we actually start issuing
3188 * commands to the ATA channel. We wait for BSY to clear, then
3189 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3190 * result. Determine what devices, if any, are on the channel
3191 * by looking at the device 0/1 error register. Look at the signature
3192 * stored in each device's taskfile registers, to determine if
3193 * the device is ATA or ATAPI.
3196 * PCI/etc. bus probe sem.
3197 * Obtains host lock.
3200 * Sets ATA_FLAG_DISABLED if bus reset fails.
3203 void ata_bus_reset(struct ata_port *ap)
3205 struct ata_device *device = ap->link.device;
3206 struct ata_ioports *ioaddr = &ap->ioaddr;
3207 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3209 unsigned int dev0, dev1 = 0, devmask = 0;
3212 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3214 /* determine if device 0/1 are present */
3215 if (ap->flags & ATA_FLAG_SATA_RESET)
3218 dev0 = ata_devchk(ap, 0);
3220 dev1 = ata_devchk(ap, 1);
3224 devmask |= (1 << 0);
3226 devmask |= (1 << 1);
3228 /* select device 0 again */
3229 ap->ops->dev_select(ap, 0);
3231 /* issue bus reset */
3232 if (ap->flags & ATA_FLAG_SRST) {
3233 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3234 if (rc && rc != -ENODEV)
3239 * determine by signature whether we have ATA or ATAPI devices
3241 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
3242 if ((slave_possible) && (err != 0x81))
3243 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
3245 /* is double-select really necessary? */
3246 if (device[1].class != ATA_DEV_NONE)
3247 ap->ops->dev_select(ap, 1);
3248 if (device[0].class != ATA_DEV_NONE)
3249 ap->ops->dev_select(ap, 0);
3251 /* if no devices were detected, disable this port */
3252 if ((device[0].class == ATA_DEV_NONE) &&
3253 (device[1].class == ATA_DEV_NONE))
3256 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3257 /* set up device control for ATA_FLAG_SATA_RESET */
3258 iowrite8(ap->ctl, ioaddr->ctl_addr);
3265 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3266 ata_port_disable(ap);
3272 * sata_link_debounce - debounce SATA phy status
3273 * @link: ATA link to debounce SATA phy status for
3274 * @params: timing parameters { interval, duratinon, timeout } in msec
3275 * @deadline: deadline jiffies for the operation
3277 * Make sure SStatus of @link reaches stable state, determined by
3278 * holding the same value where DET is not 1 for @duration polled
3279 * every @interval, before @timeout. Timeout constraints the
3280 * beginning of the stable state. Because DET gets stuck at 1 on
3281 * some controllers after hot unplugging, this functions waits
3282 * until timeout then returns 0 if DET is stable at 1.
3284 * @timeout is further limited by @deadline. The sooner of the
3288 * Kernel thread context (may sleep)
3291 * 0 on success, -errno on failure.
3293 int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3294 unsigned long deadline)
3296 unsigned long interval_msec = params[0];
3297 unsigned long duration = msecs_to_jiffies(params[1]);
3298 unsigned long last_jiffies, t;
3302 t = jiffies + msecs_to_jiffies(params[2]);
3303 if (time_before(t, deadline))
3306 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3311 last_jiffies = jiffies;
3314 msleep(interval_msec);
3315 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
3321 if (cur == 1 && time_before(jiffies, deadline))
3323 if (time_after(jiffies, last_jiffies + duration))
3328 /* unstable, start over */
3330 last_jiffies = jiffies;
3332 /* Check deadline. If debouncing failed, return
3333 * -EPIPE to tell upper layer to lower link speed.
3335 if (time_after(jiffies, deadline))
3341 * sata_link_resume - resume SATA link
3342 * @link: ATA link to resume SATA
3343 * @params: timing parameters { interval, duratinon, timeout } in msec
3344 * @deadline: deadline jiffies for the operation
3346 * Resume SATA phy @link and debounce it.
3349 * Kernel thread context (may sleep)
3352 * 0 on success, -errno on failure.
3354 int sata_link_resume(struct ata_link *link, const unsigned long *params,
3355 unsigned long deadline)
3360 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3363 scontrol = (scontrol & 0x0f0) | 0x300;
3365 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3368 /* Some PHYs react badly if SStatus is pounded immediately
3369 * after resuming. Delay 200ms before debouncing.
3373 return sata_link_debounce(link, params, deadline);
3377 * ata_std_prereset - prepare for reset
3378 * @link: ATA link to be reset
3379 * @deadline: deadline jiffies for the operation
3381 * @link is about to be reset. Initialize it. Failure from
3382 * prereset makes libata abort whole reset sequence and give up
3383 * that port, so prereset should be best-effort. It does its
3384 * best to prepare for reset sequence but if things go wrong, it
3385 * should just whine, not fail.
3388 * Kernel thread context (may sleep)
3391 * 0 on success, -errno otherwise.
3393 int ata_std_prereset(struct ata_link *link, unsigned long deadline)
3395 struct ata_port *ap = link->ap;
3396 struct ata_eh_context *ehc = &link->eh_context;
3397 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3400 /* handle link resume */
3401 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3402 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
3403 ehc->i.action |= ATA_EH_HARDRESET;
3405 /* if we're about to do hardreset, nothing more to do */
3406 if (ehc->i.action & ATA_EH_HARDRESET)
3409 /* if SATA, resume link */
3410 if (ap->flags & ATA_FLAG_SATA) {
3411 rc = sata_link_resume(link, timing, deadline);
3412 /* whine about phy resume failure but proceed */
3413 if (rc && rc != -EOPNOTSUPP)
3414 ata_link_printk(link, KERN_WARNING, "failed to resume "
3415 "link for reset (errno=%d)\n", rc);
3418 /* Wait for !BSY if the controller can wait for the first D2H
3419 * Reg FIS and we don't know that no device is attached.
3421 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
3422 rc = ata_wait_ready(ap, deadline);
3423 if (rc && rc != -ENODEV) {
3424 ata_link_printk(link, KERN_WARNING, "device not ready "
3425 "(errno=%d), forcing hardreset\n", rc);
3426 ehc->i.action |= ATA_EH_HARDRESET;
3434 * ata_std_softreset - reset host port via ATA SRST
3435 * @link: ATA link to reset
3436 * @classes: resulting classes of attached devices
3437 * @deadline: deadline jiffies for the operation
3439 * Reset host port using ATA SRST.
3442 * Kernel thread context (may sleep)
3445 * 0 on success, -errno otherwise.
3447 int ata_std_softreset(struct ata_link *link, unsigned int *classes,
3448 unsigned long deadline)
3450 struct ata_port *ap = link->ap;
3451 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3452 unsigned int devmask = 0;
3458 if (ata_link_offline(link)) {
3459 classes[0] = ATA_DEV_NONE;
3463 /* determine if device 0/1 are present */
3464 if (ata_devchk(ap, 0))
3465 devmask |= (1 << 0);
3466 if (slave_possible && ata_devchk(ap, 1))
3467 devmask |= (1 << 1);
3469 /* select device 0 again */
3470 ap->ops->dev_select(ap, 0);
3472 /* issue bus reset */
3473 DPRINTK("about to softreset, devmask=%x\n", devmask);
3474 rc = ata_bus_softreset(ap, devmask, deadline);
3475 /* if link is occupied, -ENODEV too is an error */
3476 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
3477 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3481 /* determine by signature whether we have ATA or ATAPI devices */
3482 classes[0] = ata_dev_try_classify(&link->device[0],
3483 devmask & (1 << 0), &err);
3484 if (slave_possible && err != 0x81)
3485 classes[1] = ata_dev_try_classify(&link->device[1],
3486 devmask & (1 << 1), &err);
3489 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3494 * sata_link_hardreset - reset link via SATA phy reset
3495 * @link: link to reset
3496 * @timing: timing parameters { interval, duratinon, timeout } in msec
3497 * @deadline: deadline jiffies for the operation
3499 * SATA phy-reset @link using DET bits of SControl register.
3502 * Kernel thread context (may sleep)
3505 * 0 on success, -errno otherwise.
3507 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
3508 unsigned long deadline)
3515 if (sata_set_spd_needed(link)) {
3516 /* SATA spec says nothing about how to reconfigure
3517 * spd. To be on the safe side, turn off phy during
3518 * reconfiguration. This works for at least ICH7 AHCI
3521 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3524 scontrol = (scontrol & 0x0f0) | 0x304;
3526 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
3532 /* issue phy wake/reset */
3533 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
3536 scontrol = (scontrol & 0x0f0) | 0x301;
3538 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
3541 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3542 * 10.4.2 says at least 1 ms.
3546 /* bring link back */
3547 rc = sata_link_resume(link, timing, deadline);
3549 DPRINTK("EXIT, rc=%d\n", rc);
3554 * sata_std_hardreset - reset host port via SATA phy reset
3555 * @link: link to reset
3556 * @class: resulting class of attached device
3557 * @deadline: deadline jiffies for the operation
3559 * SATA phy-reset host port using DET bits of SControl register,
3560 * wait for !BSY and classify the attached device.
3563 * Kernel thread context (may sleep)
3566 * 0 on success, -errno otherwise.
3568 int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3569 unsigned long deadline)
3571 struct ata_port *ap = link->ap;
3572 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3578 rc = sata_link_hardreset(link, timing, deadline);
3580 ata_link_printk(link, KERN_ERR,
3581 "COMRESET failed (errno=%d)\n", rc);
3585 /* TODO: phy layer with polling, timeouts, etc. */
3586 if (ata_link_offline(link)) {
3587 *class = ATA_DEV_NONE;
3588 DPRINTK("EXIT, link offline\n");
3592 /* wait a while before checking status, see SRST for more info */
3595 rc = ata_wait_ready(ap, deadline);
3596 /* link occupied, -ENODEV too is an error */
3598 ata_link_printk(link, KERN_ERR,
3599 "COMRESET failed (errno=%d)\n", rc);
3603 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3605 *class = ata_dev_try_classify(link->device, 1, NULL);
3607 DPRINTK("EXIT, class=%u\n", *class);
3612 * ata_std_postreset - standard postreset callback
3613 * @link: the target ata_link
3614 * @classes: classes of attached devices
3616 * This function is invoked after a successful reset. Note that
3617 * the device might have been reset more than once using
3618 * different reset methods before postreset is invoked.
3621 * Kernel thread context (may sleep)
3623 void ata_std_postreset(struct ata_link *link, unsigned int *classes)
3625 struct ata_port *ap = link->ap;
3630 /* print link status */
3631 sata_print_link_status(link);
3634 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3635 sata_scr_write(link, SCR_ERROR, serror);
3637 /* is double-select really necessary? */
3638 if (classes[0] != ATA_DEV_NONE)
3639 ap->ops->dev_select(ap, 1);
3640 if (classes[1] != ATA_DEV_NONE)
3641 ap->ops->dev_select(ap, 0);
3643 /* bail out if no device is present */
3644 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3645 DPRINTK("EXIT, no device\n");
3649 /* set up device control */
3650 if (ap->ioaddr.ctl_addr)
3651 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3657 * ata_dev_same_device - Determine whether new ID matches configured device
3658 * @dev: device to compare against
3659 * @new_class: class of the new device
3660 * @new_id: IDENTIFY page of the new device
3662 * Compare @new_class and @new_id against @dev and determine
3663 * whether @dev is the device indicated by @new_class and
3670 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3672 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3675 const u16 *old_id = dev->id;
3676 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3677 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3679 if (dev->class != new_class) {
3680 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3681 dev->class, new_class);
3685 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3686 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3687 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3688 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3690 if (strcmp(model[0], model[1])) {
3691 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3692 "'%s' != '%s'\n", model[0], model[1]);
3696 if (strcmp(serial[0], serial[1])) {
3697 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3698 "'%s' != '%s'\n", serial[0], serial[1]);
3706 * ata_dev_reread_id - Re-read IDENTIFY data
3707 * @dev: target ATA device
3708 * @readid_flags: read ID flags
3710 * Re-read IDENTIFY page and make sure @dev is still attached to
3714 * Kernel thread context (may sleep)
3717 * 0 on success, negative errno otherwise
3719 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3721 unsigned int class = dev->class;
3722 u16 *id = (void *)dev->link->ap->sector_buf;
3726 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3730 /* is the device still there? */
3731 if (!ata_dev_same_device(dev, class, id))
3734 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3739 * ata_dev_revalidate - Revalidate ATA device
3740 * @dev: device to revalidate
3741 * @readid_flags: read ID flags
3743 * Re-read IDENTIFY page, make sure @dev is still attached to the
3744 * port and reconfigure it according to the new IDENTIFY page.
3747 * Kernel thread context (may sleep)
3750 * 0 on success, negative errno otherwise
3752 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3754 u64 n_sectors = dev->n_sectors;
3757 if (!ata_dev_enabled(dev))
3761 rc = ata_dev_reread_id(dev, readid_flags);
3765 /* configure device according to the new ID */
3766 rc = ata_dev_configure(dev);
3770 /* verify n_sectors hasn't changed */
3771 if (dev->class == ATA_DEV_ATA && n_sectors &&
3772 dev->n_sectors != n_sectors) {
3773 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3775 (unsigned long long)n_sectors,
3776 (unsigned long long)dev->n_sectors);
3778 /* restore original n_sectors */
3779 dev->n_sectors = n_sectors;
3788 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3792 struct ata_blacklist_entry {
3793 const char *model_num;
3794 const char *model_rev;
3795 unsigned long horkage;
3798 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3799 /* Devices with DMA related problems under Linux */
3800 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3801 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3802 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3803 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3804 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3805 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3806 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3807 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3808 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3809 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3810 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3811 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3812 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3813 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3814 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3815 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3816 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3817 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3818 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3819 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3820 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3821 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3822 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3823 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3824 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3825 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3826 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3827 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3828 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3829 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3830 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3831 { "IOMEGA ZIP 250 ATAPI Floppy",
3832 NULL, ATA_HORKAGE_NODMA },
3834 /* Weird ATAPI devices */
3835 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3837 /* Devices we expect to fail diagnostics */
3839 /* Devices where NCQ should be avoided */
3841 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3842 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3843 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3845 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
3846 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3847 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3848 ATA_HORKAGE_NONCQ },
3850 /* Blacklist entries taken from Silicon Image 3124/3132
3851 Windows driver .inf file - also several Linux problem reports */
3852 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3853 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3854 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3855 /* Drives which do spurious command completion */
3856 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3857 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3858 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3859 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3860 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3861 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3862 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
3863 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3865 /* devices which puke on READ_NATIVE_MAX */
3866 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3867 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3868 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3869 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3875 int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3881 * check for trailing wildcard: *\0
3883 p = strchr(patt, wildchar);
3884 if (p && ((*(p + 1)) == 0))
3889 return strncmp(patt, name, len);
3892 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3894 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3895 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3896 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3898 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3899 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3901 while (ad->model_num) {
3902 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
3903 if (ad->model_rev == NULL)
3905 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
3913 static int ata_dma_blacklisted(const struct ata_device *dev)
3915 /* We don't support polling DMA.
3916 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3917 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3919 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3920 (dev->flags & ATA_DFLAG_CDB_INTR))
3922 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3926 * ata_dev_xfermask - Compute supported xfermask of the given device
3927 * @dev: Device to compute xfermask for
3929 * Compute supported xfermask of @dev and store it in
3930 * dev->*_mask. This function is responsible for applying all
3931 * known limits including host controller limits, device
3937 static void ata_dev_xfermask(struct ata_device *dev)
3939 struct ata_link *link = dev->link;
3940 struct ata_port *ap = link->ap;
3941 struct ata_host *host = ap->host;
3942 unsigned long xfer_mask;
3944 /* controller modes available */
3945 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3946 ap->mwdma_mask, ap->udma_mask);
3948 /* drive modes available */
3949 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3950 dev->mwdma_mask, dev->udma_mask);
3951 xfer_mask &= ata_id_xfermask(dev->id);
3954 * CFA Advanced TrueIDE timings are not allowed on a shared
3957 if (ata_dev_pair(dev)) {
3958 /* No PIO5 or PIO6 */
3959 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3960 /* No MWDMA3 or MWDMA 4 */
3961 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3964 if (ata_dma_blacklisted(dev)) {
3965 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3966 ata_dev_printk(dev, KERN_WARNING,
3967 "device is on DMA blacklist, disabling DMA\n");
3970 if ((host->flags & ATA_HOST_SIMPLEX) &&
3971 host->simplex_claimed && host->simplex_claimed != ap) {
3972 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3973 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3974 "other device, disabling DMA\n");
3977 if (ap->flags & ATA_FLAG_NO_IORDY)
3978 xfer_mask &= ata_pio_mask_no_iordy(dev);
3980 if (ap->ops->mode_filter)
3981 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3983 /* Apply cable rule here. Don't apply it early because when
3984 * we handle hot plug the cable type can itself change.
3985 * Check this last so that we know if the transfer rate was
3986 * solely limited by the cable.
3987 * Unknown or 80 wire cables reported host side are checked
3988 * drive side as well. Cases where we know a 40wire cable
3989 * is used safely for 80 are not checked here.
3991 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3992 /* UDMA/44 or higher would be available */
3993 if((ap->cbl == ATA_CBL_PATA40) ||
3994 (ata_drive_40wire(dev->id) &&
3995 (ap->cbl == ATA_CBL_PATA_UNK ||
3996 ap->cbl == ATA_CBL_PATA80))) {
3997 ata_dev_printk(dev, KERN_WARNING,
3998 "limited to UDMA/33 due to 40-wire cable\n");
3999 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4002 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4003 &dev->mwdma_mask, &dev->udma_mask);
4007 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
4008 * @dev: Device to which command will be sent
4010 * Issue SET FEATURES - XFER MODE command to device @dev
4014 * PCI/etc. bus probe sem.
4017 * 0 on success, AC_ERR_* mask otherwise.
4020 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4022 struct ata_taskfile tf;
4023 unsigned int err_mask;
4025 /* set up set-features taskfile */
4026 DPRINTK("set features - xfer mode\n");
4028 /* Some controllers and ATAPI devices show flaky interrupt
4029 * behavior after setting xfer mode. Use polling instead.
4031 ata_tf_init(dev, &tf);
4032 tf.command = ATA_CMD_SET_FEATURES;
4033 tf.feature = SETFEATURES_XFER;
4034 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
4035 tf.protocol = ATA_PROT_NODATA;
4036 tf.nsect = dev->xfer_mode;
4038 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4040 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4045 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4046 * @dev: Device to which command will be sent
4047 * @enable: Whether to enable or disable the feature
4049 * Issue SET FEATURES - SATA FEATURES command to device @dev
4050 * on port @ap with sector count set to indicate Asynchronous
4051 * Notification feature
4054 * PCI/etc. bus probe sem.
4057 * 0 on success, AC_ERR_* mask otherwise.
4059 static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4061 struct ata_taskfile tf;
4062 unsigned int err_mask;
4064 /* set up set-features taskfile */
4065 DPRINTK("set features - SATA features\n");
4067 ata_tf_init(dev, &tf);
4068 tf.command = ATA_CMD_SET_FEATURES;
4069 tf.feature = enable;
4070 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4071 tf.protocol = ATA_PROT_NODATA;
4074 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4076 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4081 * ata_dev_init_params - Issue INIT DEV PARAMS command
4082 * @dev: Device to which command will be sent
4083 * @heads: Number of heads (taskfile parameter)
4084 * @sectors: Number of sectors (taskfile parameter)
4087 * Kernel thread context (may sleep)
4090 * 0 on success, AC_ERR_* mask otherwise.
4092 static unsigned int ata_dev_init_params(struct ata_device *dev,
4093 u16 heads, u16 sectors)
4095 struct ata_taskfile tf;
4096 unsigned int err_mask;
4098 /* Number of sectors per track 1-255. Number of heads 1-16 */
4099 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
4100 return AC_ERR_INVALID;
4102 /* set up init dev params taskfile */
4103 DPRINTK("init dev params \n");
4105 ata_tf_init(dev, &tf);
4106 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4107 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4108 tf.protocol = ATA_PROT_NODATA;
4110 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4112 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4113 /* A clean abort indicates an original or just out of spec drive
4114 and we should continue as we issue the setup based on the
4115 drive reported working geometry */
4116 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4119 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4124 * ata_sg_clean - Unmap DMA memory associated with command
4125 * @qc: Command containing DMA memory to be released
4127 * Unmap all mapped DMA memory associated with this command.
4130 * spin_lock_irqsave(host lock)
4132 void ata_sg_clean(struct ata_queued_cmd *qc)
4134 struct ata_port *ap = qc->ap;
4135 struct scatterlist *sg = qc->__sg;
4136 int dir = qc->dma_dir;
4137 void *pad_buf = NULL;
4139 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4140 WARN_ON(sg == NULL);
4142 if (qc->flags & ATA_QCFLAG_SINGLE)
4143 WARN_ON(qc->n_elem > 1);
4145 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4147 /* if we padded the buffer out to 32-bit bound, and data
4148 * xfer direction is from-device, we must copy from the
4149 * pad buffer back into the supplied buffer
4151 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4152 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4154 if (qc->flags & ATA_QCFLAG_SG) {
4156 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4157 /* restore last sg */
4158 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4160 struct scatterlist *psg = &qc->pad_sgent;
4161 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4162 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4163 kunmap_atomic(addr, KM_IRQ0);
4167 dma_unmap_single(ap->dev,
4168 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4171 sg->length += qc->pad_len;
4173 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4174 pad_buf, qc->pad_len);
4177 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4182 * ata_fill_sg - Fill PCI IDE PRD table
4183 * @qc: Metadata associated with taskfile to be transferred
4185 * Fill PCI IDE PRD (scatter-gather) table with segments
4186 * associated with the current disk command.
4189 * spin_lock_irqsave(host lock)
4192 static void ata_fill_sg(struct ata_queued_cmd *qc)
4194 struct ata_port *ap = qc->ap;
4195 struct scatterlist *sg;
4198 WARN_ON(qc->__sg == NULL);
4199 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4202 ata_for_each_sg(sg, qc) {
4206 /* determine if physical DMA addr spans 64K boundary.
4207 * Note h/w doesn't support 64-bit, so we unconditionally
4208 * truncate dma_addr_t to u32.
4210 addr = (u32) sg_dma_address(sg);
4211 sg_len = sg_dma_len(sg);
4214 offset = addr & 0xffff;
4216 if ((offset + sg_len) > 0x10000)
4217 len = 0x10000 - offset;
4219 ap->prd[idx].addr = cpu_to_le32(addr);
4220 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4221 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4230 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4234 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4235 * @qc: Metadata associated with taskfile to be transferred
4237 * Fill PCI IDE PRD (scatter-gather) table with segments
4238 * associated with the current disk command. Perform the fill
4239 * so that we avoid writing any length 64K records for
4240 * controllers that don't follow the spec.
4243 * spin_lock_irqsave(host lock)
4246 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4248 struct ata_port *ap = qc->ap;
4249 struct scatterlist *sg;
4252 WARN_ON(qc->__sg == NULL);
4253 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4256 ata_for_each_sg(sg, qc) {
4258 u32 sg_len, len, blen;
4260 /* determine if physical DMA addr spans 64K boundary.
4261 * Note h/w doesn't support 64-bit, so we unconditionally
4262 * truncate dma_addr_t to u32.
4264 addr = (u32) sg_dma_address(sg);
4265 sg_len = sg_dma_len(sg);
4268 offset = addr & 0xffff;
4270 if ((offset + sg_len) > 0x10000)
4271 len = 0x10000 - offset;
4273 blen = len & 0xffff;
4274 ap->prd[idx].addr = cpu_to_le32(addr);
4276 /* Some PATA chipsets like the CS5530 can't
4277 cope with 0x0000 meaning 64K as the spec says */
4278 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4280 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4282 ap->prd[idx].flags_len = cpu_to_le32(blen);
4283 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4292 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4296 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4297 * @qc: Metadata associated with taskfile to check
4299 * Allow low-level driver to filter ATA PACKET commands, returning
4300 * a status indicating whether or not it is OK to use DMA for the
4301 * supplied PACKET command.
4304 * spin_lock_irqsave(host lock)
4306 * RETURNS: 0 when ATAPI DMA can be used
4309 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4311 struct ata_port *ap = qc->ap;
4313 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4314 * few ATAPI devices choke on such DMA requests.
4316 if (unlikely(qc->nbytes & 15))
4319 if (ap->ops->check_atapi_dma)
4320 return ap->ops->check_atapi_dma(qc);
4326 * ata_qc_prep - Prepare taskfile for submission
4327 * @qc: Metadata associated with taskfile to be prepared
4329 * Prepare ATA taskfile for submission.
4332 * spin_lock_irqsave(host lock)
4334 void ata_qc_prep(struct ata_queued_cmd *qc)
4336 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4343 * ata_dumb_qc_prep - Prepare taskfile for submission
4344 * @qc: Metadata associated with taskfile to be prepared
4346 * Prepare ATA taskfile for submission.
4349 * spin_lock_irqsave(host lock)
4351 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4353 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4356 ata_fill_sg_dumb(qc);
4359 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4362 * ata_sg_init_one - Associate command with memory buffer
4363 * @qc: Command to be associated
4364 * @buf: Memory buffer
4365 * @buflen: Length of memory buffer, in bytes.
4367 * Initialize the data-related elements of queued_cmd @qc
4368 * to point to a single memory buffer, @buf of byte length @buflen.
4371 * spin_lock_irqsave(host lock)
4374 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4376 qc->flags |= ATA_QCFLAG_SINGLE;
4378 qc->__sg = &qc->sgent;
4380 qc->orig_n_elem = 1;
4382 qc->nbytes = buflen;
4384 sg_init_one(&qc->sgent, buf, buflen);
4388 * ata_sg_init - Associate command with scatter-gather table.
4389 * @qc: Command to be associated
4390 * @sg: Scatter-gather table.
4391 * @n_elem: Number of elements in s/g table.
4393 * Initialize the data-related elements of queued_cmd @qc
4394 * to point to a scatter-gather table @sg, containing @n_elem
4398 * spin_lock_irqsave(host lock)
4401 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4402 unsigned int n_elem)
4404 qc->flags |= ATA_QCFLAG_SG;
4406 qc->n_elem = n_elem;
4407 qc->orig_n_elem = n_elem;
4411 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4412 * @qc: Command with memory buffer to be mapped.
4414 * DMA-map the memory buffer associated with queued_cmd @qc.
4417 * spin_lock_irqsave(host lock)
4420 * Zero on success, negative on error.
4423 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4425 struct ata_port *ap = qc->ap;
4426 int dir = qc->dma_dir;
4427 struct scatterlist *sg = qc->__sg;
4428 dma_addr_t dma_address;
4431 /* we must lengthen transfers to end on a 32-bit boundary */
4432 qc->pad_len = sg->length & 3;
4434 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4435 struct scatterlist *psg = &qc->pad_sgent;
4437 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4439 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4441 if (qc->tf.flags & ATA_TFLAG_WRITE)
4442 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4445 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4446 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4448 sg->length -= qc->pad_len;
4449 if (sg->length == 0)
4452 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4453 sg->length, qc->pad_len);
4461 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4463 if (dma_mapping_error(dma_address)) {
4465 sg->length += qc->pad_len;
4469 sg_dma_address(sg) = dma_address;
4470 sg_dma_len(sg) = sg->length;
4473 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4474 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4480 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4481 * @qc: Command with scatter-gather table to be mapped.
4483 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4486 * spin_lock_irqsave(host lock)
4489 * Zero on success, negative on error.
4493 static int ata_sg_setup(struct ata_queued_cmd *qc)
4495 struct ata_port *ap = qc->ap;
4496 struct scatterlist *sg = qc->__sg;
4497 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4498 int n_elem, pre_n_elem, dir, trim_sg = 0;
4500 VPRINTK("ENTER, ata%u\n", ap->print_id);
4501 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4503 /* we must lengthen transfers to end on a 32-bit boundary */
4504 qc->pad_len = lsg->length & 3;
4506 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4507 struct scatterlist *psg = &qc->pad_sgent;
4508 unsigned int offset;
4510 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4512 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4515 * psg->page/offset are used to copy to-be-written
4516 * data in this function or read data in ata_sg_clean.
4518 offset = lsg->offset + lsg->length - qc->pad_len;
4519 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4520 psg->offset = offset_in_page(offset);
4522 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4523 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4524 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4525 kunmap_atomic(addr, KM_IRQ0);
4528 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4529 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4531 lsg->length -= qc->pad_len;
4532 if (lsg->length == 0)
4535 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4536 qc->n_elem - 1, lsg->length, qc->pad_len);
4539 pre_n_elem = qc->n_elem;
4540 if (trim_sg && pre_n_elem)
4549 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4551 /* restore last sg */
4552 lsg->length += qc->pad_len;
4556 DPRINTK("%d sg elements mapped\n", n_elem);
4559 qc->n_elem = n_elem;
4565 * swap_buf_le16 - swap halves of 16-bit words in place
4566 * @buf: Buffer to swap
4567 * @buf_words: Number of 16-bit words in buffer.
4569 * Swap halves of 16-bit words if needed to convert from
4570 * little-endian byte order to native cpu byte order, or
4574 * Inherited from caller.
4576 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4581 for (i = 0; i < buf_words; i++)
4582 buf[i] = le16_to_cpu(buf[i]);
4583 #endif /* __BIG_ENDIAN */
4587 * ata_data_xfer - Transfer data by PIO
4588 * @adev: device to target
4590 * @buflen: buffer length
4591 * @write_data: read/write
4593 * Transfer data from/to the device data register by PIO.
4596 * Inherited from caller.
4598 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4599 unsigned int buflen, int write_data)
4601 struct ata_port *ap = adev->link->ap;
4602 unsigned int words = buflen >> 1;
4604 /* Transfer multiple of 2 bytes */
4606 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4608 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4610 /* Transfer trailing 1 byte, if any. */
4611 if (unlikely(buflen & 0x01)) {
4612 u16 align_buf[1] = { 0 };
4613 unsigned char *trailing_buf = buf + buflen - 1;
4616 memcpy(align_buf, trailing_buf, 1);
4617 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4619 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4620 memcpy(trailing_buf, align_buf, 1);
4626 * ata_data_xfer_noirq - Transfer data by PIO
4627 * @adev: device to target
4629 * @buflen: buffer length
4630 * @write_data: read/write
4632 * Transfer data from/to the device data register by PIO. Do the
4633 * transfer with interrupts disabled.
4636 * Inherited from caller.
4638 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4639 unsigned int buflen, int write_data)
4641 unsigned long flags;
4642 local_irq_save(flags);
4643 ata_data_xfer(adev, buf, buflen, write_data);
4644 local_irq_restore(flags);
4649 * ata_pio_sector - Transfer a sector of data.
4650 * @qc: Command on going
4652 * Transfer qc->sect_size bytes of data from/to the ATA device.
4655 * Inherited from caller.
4658 static void ata_pio_sector(struct ata_queued_cmd *qc)
4660 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4661 struct scatterlist *sg = qc->__sg;
4662 struct ata_port *ap = qc->ap;
4664 unsigned int offset;
4667 if (qc->curbytes == qc->nbytes - qc->sect_size)
4668 ap->hsm_task_state = HSM_ST_LAST;
4670 page = sg[qc->cursg].page;
4671 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4673 /* get the current page and offset */
4674 page = nth_page(page, (offset >> PAGE_SHIFT));
4675 offset %= PAGE_SIZE;
4677 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4679 if (PageHighMem(page)) {
4680 unsigned long flags;
4682 /* FIXME: use a bounce buffer */
4683 local_irq_save(flags);
4684 buf = kmap_atomic(page, KM_IRQ0);
4686 /* do the actual data transfer */
4687 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4689 kunmap_atomic(buf, KM_IRQ0);
4690 local_irq_restore(flags);
4692 buf = page_address(page);
4693 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4696 qc->curbytes += qc->sect_size;
4697 qc->cursg_ofs += qc->sect_size;
4699 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4706 * ata_pio_sectors - Transfer one or many sectors.
4707 * @qc: Command on going
4709 * Transfer one or many sectors of data from/to the
4710 * ATA device for the DRQ request.
4713 * Inherited from caller.
4716 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4718 if (is_multi_taskfile(&qc->tf)) {
4719 /* READ/WRITE MULTIPLE */
4722 WARN_ON(qc->dev->multi_count == 0);
4724 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4725 qc->dev->multi_count);
4731 ata_altstatus(qc->ap); /* flush */
4735 * atapi_send_cdb - Write CDB bytes to hardware
4736 * @ap: Port to which ATAPI device is attached.
4737 * @qc: Taskfile currently active
4739 * When device has indicated its readiness to accept
4740 * a CDB, this function is called. Send the CDB.
4746 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4749 DPRINTK("send cdb\n");
4750 WARN_ON(qc->dev->cdb_len < 12);
4752 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4753 ata_altstatus(ap); /* flush */
4755 switch (qc->tf.protocol) {
4756 case ATA_PROT_ATAPI:
4757 ap->hsm_task_state = HSM_ST;
4759 case ATA_PROT_ATAPI_NODATA:
4760 ap->hsm_task_state = HSM_ST_LAST;
4762 case ATA_PROT_ATAPI_DMA:
4763 ap->hsm_task_state = HSM_ST_LAST;
4764 /* initiate bmdma */
4765 ap->ops->bmdma_start(qc);
4771 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4772 * @qc: Command on going
4773 * @bytes: number of bytes
4775 * Transfer Transfer data from/to the ATAPI device.
4778 * Inherited from caller.
4782 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4784 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4785 struct scatterlist *sg = qc->__sg;
4786 struct ata_port *ap = qc->ap;
4789 unsigned int offset, count;
4791 if (qc->curbytes + bytes >= qc->nbytes)
4792 ap->hsm_task_state = HSM_ST_LAST;
4795 if (unlikely(qc->cursg >= qc->n_elem)) {
4797 * The end of qc->sg is reached and the device expects
4798 * more data to transfer. In order not to overrun qc->sg
4799 * and fulfill length specified in the byte count register,
4800 * - for read case, discard trailing data from the device
4801 * - for write case, padding zero data to the device
4803 u16 pad_buf[1] = { 0 };
4804 unsigned int words = bytes >> 1;
4807 if (words) /* warning if bytes > 1 */
4808 ata_dev_printk(qc->dev, KERN_WARNING,
4809 "%u bytes trailing data\n", bytes);
4811 for (i = 0; i < words; i++)
4812 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4814 ap->hsm_task_state = HSM_ST_LAST;
4818 sg = &qc->__sg[qc->cursg];
4821 offset = sg->offset + qc->cursg_ofs;
4823 /* get the current page and offset */
4824 page = nth_page(page, (offset >> PAGE_SHIFT));
4825 offset %= PAGE_SIZE;
4827 /* don't overrun current sg */
4828 count = min(sg->length - qc->cursg_ofs, bytes);
4830 /* don't cross page boundaries */
4831 count = min(count, (unsigned int)PAGE_SIZE - offset);
4833 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4835 if (PageHighMem(page)) {
4836 unsigned long flags;
4838 /* FIXME: use bounce buffer */
4839 local_irq_save(flags);
4840 buf = kmap_atomic(page, KM_IRQ0);
4842 /* do the actual data transfer */
4843 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4845 kunmap_atomic(buf, KM_IRQ0);
4846 local_irq_restore(flags);
4848 buf = page_address(page);
4849 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4853 qc->curbytes += count;
4854 qc->cursg_ofs += count;
4856 if (qc->cursg_ofs == sg->length) {
4866 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4867 * @qc: Command on going
4869 * Transfer Transfer data from/to the ATAPI device.
4872 * Inherited from caller.
4875 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4877 struct ata_port *ap = qc->ap;
4878 struct ata_device *dev = qc->dev;
4879 unsigned int ireason, bc_lo, bc_hi, bytes;
4880 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4882 /* Abuse qc->result_tf for temp storage of intermediate TF
4883 * here to save some kernel stack usage.
4884 * For normal completion, qc->result_tf is not relevant. For
4885 * error, qc->result_tf is later overwritten by ata_qc_complete().
4886 * So, the correctness of qc->result_tf is not affected.
4888 ap->ops->tf_read(ap, &qc->result_tf);
4889 ireason = qc->result_tf.nsect;
4890 bc_lo = qc->result_tf.lbam;
4891 bc_hi = qc->result_tf.lbah;
4892 bytes = (bc_hi << 8) | bc_lo;
4894 /* shall be cleared to zero, indicating xfer of data */
4895 if (ireason & (1 << 0))
4898 /* make sure transfer direction matches expected */
4899 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4900 if (do_write != i_write)
4903 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4905 __atapi_pio_bytes(qc, bytes);
4906 ata_altstatus(ap); /* flush */
4911 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4912 qc->err_mask |= AC_ERR_HSM;
4913 ap->hsm_task_state = HSM_ST_ERR;
4917 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4918 * @ap: the target ata_port
4922 * 1 if ok in workqueue, 0 otherwise.
4925 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4927 if (qc->tf.flags & ATA_TFLAG_POLLING)
4930 if (ap->hsm_task_state == HSM_ST_FIRST) {
4931 if (qc->tf.protocol == ATA_PROT_PIO &&
4932 (qc->tf.flags & ATA_TFLAG_WRITE))
4935 if (is_atapi_taskfile(&qc->tf) &&
4936 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4944 * ata_hsm_qc_complete - finish a qc running on standard HSM
4945 * @qc: Command to complete
4946 * @in_wq: 1 if called from workqueue, 0 otherwise
4948 * Finish @qc which is running on standard HSM.
4951 * If @in_wq is zero, spin_lock_irqsave(host lock).
4952 * Otherwise, none on entry and grabs host lock.
4954 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4956 struct ata_port *ap = qc->ap;
4957 unsigned long flags;
4959 if (ap->ops->error_handler) {
4961 spin_lock_irqsave(ap->lock, flags);
4963 /* EH might have kicked in while host lock is
4966 qc = ata_qc_from_tag(ap, qc->tag);
4968 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4969 ap->ops->irq_on(ap);
4970 ata_qc_complete(qc);
4972 ata_port_freeze(ap);
4975 spin_unlock_irqrestore(ap->lock, flags);
4977 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4978 ata_qc_complete(qc);
4980 ata_port_freeze(ap);
4984 spin_lock_irqsave(ap->lock, flags);
4985 ap->ops->irq_on(ap);
4986 ata_qc_complete(qc);
4987 spin_unlock_irqrestore(ap->lock, flags);
4989 ata_qc_complete(qc);
4994 * ata_hsm_move - move the HSM to the next state.
4995 * @ap: the target ata_port
4997 * @status: current device status
4998 * @in_wq: 1 if called from workqueue, 0 otherwise
5001 * 1 when poll next status needed, 0 otherwise.
5003 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5004 u8 status, int in_wq)
5006 unsigned long flags = 0;
5009 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5011 /* Make sure ata_qc_issue_prot() does not throw things
5012 * like DMA polling into the workqueue. Notice that
5013 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5015 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
5018 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
5019 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
5021 switch (ap->hsm_task_state) {
5023 /* Send first data block or PACKET CDB */
5025 /* If polling, we will stay in the work queue after
5026 * sending the data. Otherwise, interrupt handler
5027 * takes over after sending the data.
5029 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5031 /* check device status */
5032 if (unlikely((status & ATA_DRQ) == 0)) {
5033 /* handle BSY=0, DRQ=0 as error */
5034 if (likely(status & (ATA_ERR | ATA_DF)))
5035 /* device stops HSM for abort/error */
5036 qc->err_mask |= AC_ERR_DEV;
5038 /* HSM violation. Let EH handle this */
5039 qc->err_mask |= AC_ERR_HSM;
5041 ap->hsm_task_state = HSM_ST_ERR;
5045 /* Device should not ask for data transfer (DRQ=1)
5046 * when it finds something wrong.
5047 * We ignore DRQ here and stop the HSM by
5048 * changing hsm_task_state to HSM_ST_ERR and
5049 * let the EH abort the command or reset the device.
5051 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5052 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5053 "error, dev_stat 0x%X\n", status);
5054 qc->err_mask |= AC_ERR_HSM;
5055 ap->hsm_task_state = HSM_ST_ERR;
5059 /* Send the CDB (atapi) or the first data block (ata pio out).
5060 * During the state transition, interrupt handler shouldn't
5061 * be invoked before the data transfer is complete and
5062 * hsm_task_state is changed. Hence, the following locking.
5065 spin_lock_irqsave(ap->lock, flags);
5067 if (qc->tf.protocol == ATA_PROT_PIO) {
5068 /* PIO data out protocol.
5069 * send first data block.
5072 /* ata_pio_sectors() might change the state
5073 * to HSM_ST_LAST. so, the state is changed here
5074 * before ata_pio_sectors().
5076 ap->hsm_task_state = HSM_ST;
5077 ata_pio_sectors(qc);
5080 atapi_send_cdb(ap, qc);
5083 spin_unlock_irqrestore(ap->lock, flags);
5085 /* if polling, ata_pio_task() handles the rest.
5086 * otherwise, interrupt handler takes over from here.
5091 /* complete command or read/write the data register */
5092 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5093 /* ATAPI PIO protocol */
5094 if ((status & ATA_DRQ) == 0) {
5095 /* No more data to transfer or device error.
5096 * Device error will be tagged in HSM_ST_LAST.
5098 ap->hsm_task_state = HSM_ST_LAST;
5102 /* Device should not ask for data transfer (DRQ=1)
5103 * when it finds something wrong.
5104 * We ignore DRQ here and stop the HSM by
5105 * changing hsm_task_state to HSM_ST_ERR and
5106 * let the EH abort the command or reset the device.
5108 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5109 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5110 "device error, dev_stat 0x%X\n",
5112 qc->err_mask |= AC_ERR_HSM;
5113 ap->hsm_task_state = HSM_ST_ERR;
5117 atapi_pio_bytes(qc);
5119 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5120 /* bad ireason reported by device */
5124 /* ATA PIO protocol */
5125 if (unlikely((status & ATA_DRQ) == 0)) {
5126 /* handle BSY=0, DRQ=0 as error */
5127 if (likely(status & (ATA_ERR | ATA_DF)))
5128 /* device stops HSM for abort/error */
5129 qc->err_mask |= AC_ERR_DEV;
5131 /* HSM violation. Let EH handle this.
5132 * Phantom devices also trigger this
5133 * condition. Mark hint.
5135 qc->err_mask |= AC_ERR_HSM |
5138 ap->hsm_task_state = HSM_ST_ERR;
5142 /* For PIO reads, some devices may ask for
5143 * data transfer (DRQ=1) alone with ERR=1.
5144 * We respect DRQ here and transfer one
5145 * block of junk data before changing the
5146 * hsm_task_state to HSM_ST_ERR.
5148 * For PIO writes, ERR=1 DRQ=1 doesn't make
5149 * sense since the data block has been
5150 * transferred to the device.
5152 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5153 /* data might be corrputed */
5154 qc->err_mask |= AC_ERR_DEV;
5156 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5157 ata_pio_sectors(qc);
5158 status = ata_wait_idle(ap);
5161 if (status & (ATA_BUSY | ATA_DRQ))
5162 qc->err_mask |= AC_ERR_HSM;
5164 /* ata_pio_sectors() might change the
5165 * state to HSM_ST_LAST. so, the state
5166 * is changed after ata_pio_sectors().
5168 ap->hsm_task_state = HSM_ST_ERR;
5172 ata_pio_sectors(qc);
5174 if (ap->hsm_task_state == HSM_ST_LAST &&
5175 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5177 status = ata_wait_idle(ap);
5186 if (unlikely(!ata_ok(status))) {
5187 qc->err_mask |= __ac_err_mask(status);
5188 ap->hsm_task_state = HSM_ST_ERR;
5192 /* no more data to transfer */
5193 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5194 ap->print_id, qc->dev->devno, status);
5196 WARN_ON(qc->err_mask);
5198 ap->hsm_task_state = HSM_ST_IDLE;
5200 /* complete taskfile transaction */
5201 ata_hsm_qc_complete(qc, in_wq);
5207 /* make sure qc->err_mask is available to
5208 * know what's wrong and recover
5210 WARN_ON(qc->err_mask == 0);
5212 ap->hsm_task_state = HSM_ST_IDLE;
5214 /* complete taskfile transaction */
5215 ata_hsm_qc_complete(qc, in_wq);
5227 static void ata_pio_task(struct work_struct *work)
5229 struct ata_port *ap =
5230 container_of(work, struct ata_port, port_task.work);
5231 struct ata_queued_cmd *qc = ap->port_task_data;
5236 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5239 * This is purely heuristic. This is a fast path.
5240 * Sometimes when we enter, BSY will be cleared in
5241 * a chk-status or two. If not, the drive is probably seeking
5242 * or something. Snooze for a couple msecs, then
5243 * chk-status again. If still busy, queue delayed work.
5245 status = ata_busy_wait(ap, ATA_BUSY, 5);
5246 if (status & ATA_BUSY) {
5248 status = ata_busy_wait(ap, ATA_BUSY, 10);
5249 if (status & ATA_BUSY) {
5250 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5256 poll_next = ata_hsm_move(ap, qc, status, 1);
5258 /* another command or interrupt handler
5259 * may be running at this point.
5266 * ata_qc_new - Request an available ATA command, for queueing
5267 * @ap: Port associated with device @dev
5268 * @dev: Device from whom we request an available command structure
5274 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5276 struct ata_queued_cmd *qc = NULL;
5279 /* no command while frozen */
5280 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5283 /* the last tag is reserved for internal command. */
5284 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5285 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5286 qc = __ata_qc_from_tag(ap, i);
5297 * ata_qc_new_init - Request an available ATA command, and initialize it
5298 * @dev: Device from whom we request an available command structure
5304 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5306 struct ata_port *ap = dev->link->ap;
5307 struct ata_queued_cmd *qc;
5309 qc = ata_qc_new(ap);
5322 * ata_qc_free - free unused ata_queued_cmd
5323 * @qc: Command to complete
5325 * Designed to free unused ata_queued_cmd object
5326 * in case something prevents using it.
5329 * spin_lock_irqsave(host lock)
5331 void ata_qc_free(struct ata_queued_cmd *qc)
5333 struct ata_port *ap = qc->ap;
5336 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5340 if (likely(ata_tag_valid(tag))) {
5341 qc->tag = ATA_TAG_POISON;
5342 clear_bit(tag, &ap->qc_allocated);
5346 void __ata_qc_complete(struct ata_queued_cmd *qc)
5348 struct ata_port *ap = qc->ap;
5349 struct ata_link *link = qc->dev->link;
5351 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5352 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5354 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5357 /* command should be marked inactive atomically with qc completion */
5358 if (qc->tf.protocol == ATA_PROT_NCQ)
5359 link->sactive &= ~(1 << qc->tag);
5361 link->active_tag = ATA_TAG_POISON;
5363 /* atapi: mark qc as inactive to prevent the interrupt handler
5364 * from completing the command twice later, before the error handler
5365 * is called. (when rc != 0 and atapi request sense is needed)
5367 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5368 ap->qc_active &= ~(1 << qc->tag);
5370 /* call completion callback */
5371 qc->complete_fn(qc);
5374 static void fill_result_tf(struct ata_queued_cmd *qc)
5376 struct ata_port *ap = qc->ap;
5378 qc->result_tf.flags = qc->tf.flags;
5379 ap->ops->tf_read(ap, &qc->result_tf);
5383 * ata_qc_complete - Complete an active ATA command
5384 * @qc: Command to complete
5385 * @err_mask: ATA Status register contents
5387 * Indicate to the mid and upper layers that an ATA
5388 * command has completed, with either an ok or not-ok status.
5391 * spin_lock_irqsave(host lock)
5393 void ata_qc_complete(struct ata_queued_cmd *qc)
5395 struct ata_port *ap = qc->ap;
5397 /* XXX: New EH and old EH use different mechanisms to
5398 * synchronize EH with regular execution path.
5400 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5401 * Normal execution path is responsible for not accessing a
5402 * failed qc. libata core enforces the rule by returning NULL
5403 * from ata_qc_from_tag() for failed qcs.
5405 * Old EH depends on ata_qc_complete() nullifying completion
5406 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5407 * not synchronize with interrupt handler. Only PIO task is
5410 if (ap->ops->error_handler) {
5411 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5413 if (unlikely(qc->err_mask))
5414 qc->flags |= ATA_QCFLAG_FAILED;
5416 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5417 if (!ata_tag_internal(qc->tag)) {
5418 /* always fill result TF for failed qc */
5420 ata_qc_schedule_eh(qc);
5425 /* read result TF if requested */
5426 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5429 __ata_qc_complete(qc);
5431 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5434 /* read result TF if failed or requested */
5435 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5438 __ata_qc_complete(qc);
5443 * ata_qc_complete_multiple - Complete multiple qcs successfully
5444 * @ap: port in question
5445 * @qc_active: new qc_active mask
5446 * @finish_qc: LLDD callback invoked before completing a qc
5448 * Complete in-flight commands. This functions is meant to be
5449 * called from low-level driver's interrupt routine to complete
5450 * requests normally. ap->qc_active and @qc_active is compared
5451 * and commands are completed accordingly.
5454 * spin_lock_irqsave(host lock)
5457 * Number of completed commands on success, -errno otherwise.
5459 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5460 void (*finish_qc)(struct ata_queued_cmd *))
5466 done_mask = ap->qc_active ^ qc_active;
5468 if (unlikely(done_mask & qc_active)) {
5469 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5470 "(%08x->%08x)\n", ap->qc_active, qc_active);
5474 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5475 struct ata_queued_cmd *qc;
5477 if (!(done_mask & (1 << i)))
5480 if ((qc = ata_qc_from_tag(ap, i))) {
5483 ata_qc_complete(qc);
5491 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5493 struct ata_port *ap = qc->ap;
5495 switch (qc->tf.protocol) {
5498 case ATA_PROT_ATAPI_DMA:
5501 case ATA_PROT_ATAPI:
5503 if (ap->flags & ATA_FLAG_PIO_DMA)
5516 * ata_qc_issue - issue taskfile to device
5517 * @qc: command to issue to device
5519 * Prepare an ATA command to submission to device.
5520 * This includes mapping the data into a DMA-able
5521 * area, filling in the S/G table, and finally
5522 * writing the taskfile to hardware, starting the command.
5525 * spin_lock_irqsave(host lock)
5527 void ata_qc_issue(struct ata_queued_cmd *qc)
5529 struct ata_port *ap = qc->ap;
5530 struct ata_link *link = qc->dev->link;
5532 /* Make sure only one non-NCQ command is outstanding. The
5533 * check is skipped for old EH because it reuses active qc to
5534 * request ATAPI sense.
5536 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5538 if (qc->tf.protocol == ATA_PROT_NCQ) {
5539 WARN_ON(link->sactive & (1 << qc->tag));
5540 link->sactive |= 1 << qc->tag;
5542 WARN_ON(link->sactive);
5543 link->active_tag = qc->tag;
5546 qc->flags |= ATA_QCFLAG_ACTIVE;
5547 ap->qc_active |= 1 << qc->tag;
5549 if (ata_should_dma_map(qc)) {
5550 if (qc->flags & ATA_QCFLAG_SG) {
5551 if (ata_sg_setup(qc))
5553 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5554 if (ata_sg_setup_one(qc))
5558 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5561 ap->ops->qc_prep(qc);
5563 qc->err_mask |= ap->ops->qc_issue(qc);
5564 if (unlikely(qc->err_mask))
5569 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5570 qc->err_mask |= AC_ERR_SYSTEM;
5572 ata_qc_complete(qc);
5576 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5577 * @qc: command to issue to device
5579 * Using various libata functions and hooks, this function
5580 * starts an ATA command. ATA commands are grouped into
5581 * classes called "protocols", and issuing each type of protocol
5582 * is slightly different.
5584 * May be used as the qc_issue() entry in ata_port_operations.
5587 * spin_lock_irqsave(host lock)
5590 * Zero on success, AC_ERR_* mask on failure
5593 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5595 struct ata_port *ap = qc->ap;
5597 /* Use polling pio if the LLD doesn't handle
5598 * interrupt driven pio and atapi CDB interrupt.
5600 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5601 switch (qc->tf.protocol) {
5603 case ATA_PROT_NODATA:
5604 case ATA_PROT_ATAPI:
5605 case ATA_PROT_ATAPI_NODATA:
5606 qc->tf.flags |= ATA_TFLAG_POLLING;
5608 case ATA_PROT_ATAPI_DMA:
5609 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5610 /* see ata_dma_blacklisted() */
5618 /* select the device */
5619 ata_dev_select(ap, qc->dev->devno, 1, 0);
5621 /* start the command */
5622 switch (qc->tf.protocol) {
5623 case ATA_PROT_NODATA:
5624 if (qc->tf.flags & ATA_TFLAG_POLLING)
5625 ata_qc_set_polling(qc);
5627 ata_tf_to_host(ap, &qc->tf);
5628 ap->hsm_task_state = HSM_ST_LAST;
5630 if (qc->tf.flags & ATA_TFLAG_POLLING)
5631 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5636 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5638 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5639 ap->ops->bmdma_setup(qc); /* set up bmdma */
5640 ap->ops->bmdma_start(qc); /* initiate bmdma */
5641 ap->hsm_task_state = HSM_ST_LAST;
5645 if (qc->tf.flags & ATA_TFLAG_POLLING)
5646 ata_qc_set_polling(qc);
5648 ata_tf_to_host(ap, &qc->tf);
5650 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5651 /* PIO data out protocol */
5652 ap->hsm_task_state = HSM_ST_FIRST;
5653 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5655 /* always send first data block using
5656 * the ata_pio_task() codepath.
5659 /* PIO data in protocol */
5660 ap->hsm_task_state = HSM_ST;
5662 if (qc->tf.flags & ATA_TFLAG_POLLING)
5663 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5665 /* if polling, ata_pio_task() handles the rest.
5666 * otherwise, interrupt handler takes over from here.
5672 case ATA_PROT_ATAPI:
5673 case ATA_PROT_ATAPI_NODATA:
5674 if (qc->tf.flags & ATA_TFLAG_POLLING)
5675 ata_qc_set_polling(qc);
5677 ata_tf_to_host(ap, &qc->tf);
5679 ap->hsm_task_state = HSM_ST_FIRST;
5681 /* send cdb by polling if no cdb interrupt */
5682 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5683 (qc->tf.flags & ATA_TFLAG_POLLING))
5684 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5687 case ATA_PROT_ATAPI_DMA:
5688 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5690 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5691 ap->ops->bmdma_setup(qc); /* set up bmdma */
5692 ap->hsm_task_state = HSM_ST_FIRST;
5694 /* send cdb by polling if no cdb interrupt */
5695 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5696 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5701 return AC_ERR_SYSTEM;
5708 * ata_host_intr - Handle host interrupt for given (port, task)
5709 * @ap: Port on which interrupt arrived (possibly...)
5710 * @qc: Taskfile currently active in engine
5712 * Handle host interrupt for given queued command. Currently,
5713 * only DMA interrupts are handled. All other commands are
5714 * handled via polling with interrupts disabled (nIEN bit).
5717 * spin_lock_irqsave(host lock)
5720 * One if interrupt was handled, zero if not (shared irq).
5723 inline unsigned int ata_host_intr (struct ata_port *ap,
5724 struct ata_queued_cmd *qc)
5726 struct ata_eh_info *ehi = &ap->link.eh_info;
5727 u8 status, host_stat = 0;
5729 VPRINTK("ata%u: protocol %d task_state %d\n",
5730 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5732 /* Check whether we are expecting interrupt in this state */
5733 switch (ap->hsm_task_state) {
5735 /* Some pre-ATAPI-4 devices assert INTRQ
5736 * at this state when ready to receive CDB.
5739 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5740 * The flag was turned on only for atapi devices.
5741 * No need to check is_atapi_taskfile(&qc->tf) again.
5743 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5747 if (qc->tf.protocol == ATA_PROT_DMA ||
5748 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5749 /* check status of DMA engine */
5750 host_stat = ap->ops->bmdma_status(ap);
5751 VPRINTK("ata%u: host_stat 0x%X\n",
5752 ap->print_id, host_stat);
5754 /* if it's not our irq... */
5755 if (!(host_stat & ATA_DMA_INTR))
5758 /* before we do anything else, clear DMA-Start bit */
5759 ap->ops->bmdma_stop(qc);
5761 if (unlikely(host_stat & ATA_DMA_ERR)) {
5762 /* error when transfering data to/from memory */
5763 qc->err_mask |= AC_ERR_HOST_BUS;
5764 ap->hsm_task_state = HSM_ST_ERR;
5774 /* check altstatus */
5775 status = ata_altstatus(ap);
5776 if (status & ATA_BUSY)
5779 /* check main status, clearing INTRQ */
5780 status = ata_chk_status(ap);
5781 if (unlikely(status & ATA_BUSY))
5784 /* ack bmdma irq events */
5785 ap->ops->irq_clear(ap);
5787 ata_hsm_move(ap, qc, status, 0);
5789 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5790 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5791 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5793 return 1; /* irq handled */
5796 ap->stats.idle_irq++;
5799 if ((ap->stats.idle_irq % 1000) == 0) {
5801 ap->ops->irq_clear(ap);
5802 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5806 return 0; /* irq not handled */
5810 * ata_interrupt - Default ATA host interrupt handler
5811 * @irq: irq line (unused)
5812 * @dev_instance: pointer to our ata_host information structure
5814 * Default interrupt handler for PCI IDE devices. Calls
5815 * ata_host_intr() for each port that is not disabled.
5818 * Obtains host lock during operation.
5821 * IRQ_NONE or IRQ_HANDLED.
5824 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5826 struct ata_host *host = dev_instance;
5828 unsigned int handled = 0;
5829 unsigned long flags;
5831 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5832 spin_lock_irqsave(&host->lock, flags);
5834 for (i = 0; i < host->n_ports; i++) {
5835 struct ata_port *ap;
5837 ap = host->ports[i];
5839 !(ap->flags & ATA_FLAG_DISABLED)) {
5840 struct ata_queued_cmd *qc;
5842 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5843 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5844 (qc->flags & ATA_QCFLAG_ACTIVE))
5845 handled |= ata_host_intr(ap, qc);
5849 spin_unlock_irqrestore(&host->lock, flags);
5851 return IRQ_RETVAL(handled);
5855 * sata_scr_valid - test whether SCRs are accessible
5856 * @link: ATA link to test SCR accessibility for
5858 * Test whether SCRs are accessible for @link.
5864 * 1 if SCRs are accessible, 0 otherwise.
5866 int sata_scr_valid(struct ata_link *link)
5868 struct ata_port *ap = link->ap;
5870 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5874 * sata_scr_read - read SCR register of the specified port
5875 * @link: ATA link to read SCR for
5877 * @val: Place to store read value
5879 * Read SCR register @reg of @link into *@val. This function is
5880 * guaranteed to succeed if the cable type of the port is SATA
5881 * and the port implements ->scr_read.
5887 * 0 on success, negative errno on failure.
5889 int sata_scr_read(struct ata_link *link, int reg, u32 *val)
5891 struct ata_port *ap = link->ap;
5893 if (sata_scr_valid(link))
5894 return ap->ops->scr_read(ap, reg, val);
5899 * sata_scr_write - write SCR register of the specified port
5900 * @link: ATA link to write SCR for
5901 * @reg: SCR to write
5902 * @val: value to write
5904 * Write @val to SCR register @reg of @link. This function is
5905 * guaranteed to succeed if the cable type of the port is SATA
5906 * and the port implements ->scr_read.
5912 * 0 on success, negative errno on failure.
5914 int sata_scr_write(struct ata_link *link, int reg, u32 val)
5916 struct ata_port *ap = link->ap;
5918 if (sata_scr_valid(link))
5919 return ap->ops->scr_write(ap, reg, val);
5924 * sata_scr_write_flush - write SCR register of the specified port and flush
5925 * @link: ATA link to write SCR for
5926 * @reg: SCR to write
5927 * @val: value to write
5929 * This function is identical to sata_scr_write() except that this
5930 * function performs flush after writing to the register.
5936 * 0 on success, negative errno on failure.
5938 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
5940 struct ata_port *ap = link->ap;
5943 if (sata_scr_valid(link)) {
5944 rc = ap->ops->scr_write(ap, reg, val);
5946 rc = ap->ops->scr_read(ap, reg, &val);
5953 * ata_link_online - test whether the given link is online
5954 * @link: ATA link to test
5956 * Test whether @link is online. Note that this function returns
5957 * 0 if online status of @link cannot be obtained, so
5958 * ata_link_online(link) != !ata_link_offline(link).
5964 * 1 if the port online status is available and online.
5966 int ata_link_online(struct ata_link *link)
5970 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5971 (sstatus & 0xf) == 0x3)
5977 * ata_link_offline - test whether the given link is offline
5978 * @link: ATA link to test
5980 * Test whether @link is offline. Note that this function
5981 * returns 0 if offline status of @link cannot be obtained, so
5982 * ata_link_online(link) != !ata_link_offline(link).
5988 * 1 if the port offline status is available and offline.
5990 int ata_link_offline(struct ata_link *link)
5994 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5995 (sstatus & 0xf) != 0x3)
6000 int ata_flush_cache(struct ata_device *dev)
6002 unsigned int err_mask;
6005 if (!ata_try_flush_cache(dev))
6008 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
6009 cmd = ATA_CMD_FLUSH_EXT;
6011 cmd = ATA_CMD_FLUSH;
6013 /* This is wrong. On a failed flush we get back the LBA of the lost
6014 sector and we should (assuming it wasn't aborted as unknown) issue
6015 a further flush command to continue the writeback until it
6017 err_mask = ata_do_simple_cmd(dev, cmd);
6019 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6027 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6028 unsigned int action, unsigned int ehi_flags,
6031 unsigned long flags;
6034 for (i = 0; i < host->n_ports; i++) {
6035 struct ata_port *ap = host->ports[i];
6036 struct ata_link *link;
6038 /* Previous resume operation might still be in
6039 * progress. Wait for PM_PENDING to clear.
6041 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6042 ata_port_wait_eh(ap);
6043 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6046 /* request PM ops to EH */
6047 spin_lock_irqsave(ap->lock, flags);
6052 ap->pm_result = &rc;
6055 ap->pflags |= ATA_PFLAG_PM_PENDING;
6056 __ata_port_for_each_link(link, ap) {
6057 link->eh_info.action |= action;
6058 link->eh_info.flags |= ehi_flags;
6061 ata_port_schedule_eh(ap);
6063 spin_unlock_irqrestore(ap->lock, flags);
6065 /* wait and check result */
6067 ata_port_wait_eh(ap);
6068 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6078 * ata_host_suspend - suspend host
6079 * @host: host to suspend
6082 * Suspend @host. Actual operation is performed by EH. This
6083 * function requests EH to perform PM operations and waits for EH
6087 * Kernel thread context (may sleep).
6090 * 0 on success, -errno on failure.
6092 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
6096 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
6098 host->dev->power.power_state = mesg;
6103 * ata_host_resume - resume host
6104 * @host: host to resume
6106 * Resume @host. Actual operation is performed by EH. This
6107 * function requests EH to perform PM operations and returns.
6108 * Note that all resume operations are performed parallely.
6111 * Kernel thread context (may sleep).
6113 void ata_host_resume(struct ata_host *host)
6115 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6116 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6117 host->dev->power.power_state = PMSG_ON;
6122 * ata_port_start - Set port up for dma.
6123 * @ap: Port to initialize
6125 * Called just after data structures for each port are
6126 * initialized. Allocates space for PRD table.
6128 * May be used as the port_start() entry in ata_port_operations.
6131 * Inherited from caller.
6133 int ata_port_start(struct ata_port *ap)
6135 struct device *dev = ap->dev;
6138 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6143 rc = ata_pad_alloc(ap, dev);
6147 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6148 (unsigned long long)ap->prd_dma);
6153 * ata_dev_init - Initialize an ata_device structure
6154 * @dev: Device structure to initialize
6156 * Initialize @dev in preparation for probing.
6159 * Inherited from caller.
6161 void ata_dev_init(struct ata_device *dev)
6163 struct ata_link *link = dev->link;
6164 struct ata_port *ap = link->ap;
6165 unsigned long flags;
6167 /* SATA spd limit is bound to the first device */
6168 link->sata_spd_limit = link->hw_sata_spd_limit;
6171 /* High bits of dev->flags are used to record warm plug
6172 * requests which occur asynchronously. Synchronize using
6175 spin_lock_irqsave(ap->lock, flags);
6176 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6178 spin_unlock_irqrestore(ap->lock, flags);
6180 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6181 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6182 dev->pio_mask = UINT_MAX;
6183 dev->mwdma_mask = UINT_MAX;
6184 dev->udma_mask = UINT_MAX;
6188 * ata_link_init - Initialize an ata_link structure
6189 * @ap: ATA port link is attached to
6190 * @link: Link structure to initialize
6191 * @pmp: Port multiplier port number
6196 * Kernel thread context (may sleep)
6198 static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
6202 /* clear everything except for devices */
6203 memset(link, 0, offsetof(struct ata_link, device[0]));
6207 link->active_tag = ATA_TAG_POISON;
6208 link->hw_sata_spd_limit = UINT_MAX;
6210 /* can't use iterator, ap isn't initialized yet */
6211 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6212 struct ata_device *dev = &link->device[i];
6215 dev->devno = dev - link->device;
6221 * sata_link_init_spd - Initialize link->sata_spd_limit
6222 * @link: Link to configure sata_spd_limit for
6224 * Initialize @link->[hw_]sata_spd_limit to the currently
6228 * Kernel thread context (may sleep).
6231 * 0 on success, -errno on failure.
6233 static int sata_link_init_spd(struct ata_link *link)
6238 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6242 spd = (scontrol >> 4) & 0xf;
6244 link->hw_sata_spd_limit &= (1 << spd) - 1;
6246 link->sata_spd_limit = link->hw_sata_spd_limit;
6252 * ata_port_alloc - allocate and initialize basic ATA port resources
6253 * @host: ATA host this allocated port belongs to
6255 * Allocate and initialize basic ATA port resources.
6258 * Allocate ATA port on success, NULL on failure.
6261 * Inherited from calling layer (may sleep).
6263 struct ata_port *ata_port_alloc(struct ata_host *host)
6265 struct ata_port *ap;
6269 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6273 ap->pflags |= ATA_PFLAG_INITIALIZING;
6274 ap->lock = &host->lock;
6275 ap->flags = ATA_FLAG_DISABLED;
6277 ap->ctl = ATA_DEVCTL_OBS;
6279 ap->dev = host->dev;
6280 ap->last_ctl = 0xFF;
6282 #if defined(ATA_VERBOSE_DEBUG)
6283 /* turn on all debugging levels */
6284 ap->msg_enable = 0x00FF;
6285 #elif defined(ATA_DEBUG)
6286 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6288 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6291 INIT_DELAYED_WORK(&ap->port_task, NULL);
6292 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6293 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6294 INIT_LIST_HEAD(&ap->eh_done_q);
6295 init_waitqueue_head(&ap->eh_wait_q);
6296 init_timer_deferrable(&ap->fastdrain_timer);
6297 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6298 ap->fastdrain_timer.data = (unsigned long)ap;
6300 ap->cbl = ATA_CBL_NONE;
6302 ata_link_init(ap, &ap->link, 0);
6305 ap->stats.unhandled_irq = 1;
6306 ap->stats.idle_irq = 1;
6311 static void ata_host_release(struct device *gendev, void *res)
6313 struct ata_host *host = dev_get_drvdata(gendev);
6316 for (i = 0; i < host->n_ports; i++) {
6317 struct ata_port *ap = host->ports[i];
6322 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6323 ap->ops->port_stop(ap);
6326 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6327 host->ops->host_stop(host);
6329 for (i = 0; i < host->n_ports; i++) {
6330 struct ata_port *ap = host->ports[i];
6336 scsi_host_put(ap->scsi_host);
6339 host->ports[i] = NULL;
6342 dev_set_drvdata(gendev, NULL);
6346 * ata_host_alloc - allocate and init basic ATA host resources
6347 * @dev: generic device this host is associated with
6348 * @max_ports: maximum number of ATA ports associated with this host
6350 * Allocate and initialize basic ATA host resources. LLD calls
6351 * this function to allocate a host, initializes it fully and
6352 * attaches it using ata_host_register().
6354 * @max_ports ports are allocated and host->n_ports is
6355 * initialized to @max_ports. The caller is allowed to decrease
6356 * host->n_ports before calling ata_host_register(). The unused
6357 * ports will be automatically freed on registration.
6360 * Allocate ATA host on success, NULL on failure.
6363 * Inherited from calling layer (may sleep).
6365 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6367 struct ata_host *host;
6373 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6376 /* alloc a container for our list of ATA ports (buses) */
6377 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6378 /* alloc a container for our list of ATA ports (buses) */
6379 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6383 devres_add(dev, host);
6384 dev_set_drvdata(dev, host);
6386 spin_lock_init(&host->lock);
6388 host->n_ports = max_ports;
6390 /* allocate ports bound to this host */
6391 for (i = 0; i < max_ports; i++) {
6392 struct ata_port *ap;
6394 ap = ata_port_alloc(host);
6399 host->ports[i] = ap;
6402 devres_remove_group(dev, NULL);
6406 devres_release_group(dev, NULL);
6411 * ata_host_alloc_pinfo - alloc host and init with port_info array
6412 * @dev: generic device this host is associated with
6413 * @ppi: array of ATA port_info to initialize host with
6414 * @n_ports: number of ATA ports attached to this host
6416 * Allocate ATA host and initialize with info from @ppi. If NULL
6417 * terminated, @ppi may contain fewer entries than @n_ports. The
6418 * last entry will be used for the remaining ports.
6421 * Allocate ATA host on success, NULL on failure.
6424 * Inherited from calling layer (may sleep).
6426 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6427 const struct ata_port_info * const * ppi,
6430 const struct ata_port_info *pi;
6431 struct ata_host *host;
6434 host = ata_host_alloc(dev, n_ports);
6438 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6439 struct ata_port *ap = host->ports[i];
6444 ap->pio_mask = pi->pio_mask;
6445 ap->mwdma_mask = pi->mwdma_mask;
6446 ap->udma_mask = pi->udma_mask;
6447 ap->flags |= pi->flags;
6448 ap->link.flags |= pi->link_flags;
6449 ap->ops = pi->port_ops;
6451 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6452 host->ops = pi->port_ops;
6453 if (!host->private_data && pi->private_data)
6454 host->private_data = pi->private_data;
6461 * ata_host_start - start and freeze ports of an ATA host
6462 * @host: ATA host to start ports for
6464 * Start and then freeze ports of @host. Started status is
6465 * recorded in host->flags, so this function can be called
6466 * multiple times. Ports are guaranteed to get started only
6467 * once. If host->ops isn't initialized yet, its set to the
6468 * first non-dummy port ops.
6471 * Inherited from calling layer (may sleep).
6474 * 0 if all ports are started successfully, -errno otherwise.
6476 int ata_host_start(struct ata_host *host)
6480 if (host->flags & ATA_HOST_STARTED)
6483 for (i = 0; i < host->n_ports; i++) {
6484 struct ata_port *ap = host->ports[i];
6486 if (!host->ops && !ata_port_is_dummy(ap))
6487 host->ops = ap->ops;
6489 if (ap->ops->port_start) {
6490 rc = ap->ops->port_start(ap);
6492 ata_port_printk(ap, KERN_ERR, "failed to "
6493 "start port (errno=%d)\n", rc);
6498 ata_eh_freeze_port(ap);
6501 host->flags |= ATA_HOST_STARTED;
6506 struct ata_port *ap = host->ports[i];
6508 if (ap->ops->port_stop)
6509 ap->ops->port_stop(ap);
6515 * ata_sas_host_init - Initialize a host struct
6516 * @host: host to initialize
6517 * @dev: device host is attached to
6518 * @flags: host flags
6522 * PCI/etc. bus probe sem.
6525 /* KILLME - the only user left is ipr */
6526 void ata_host_init(struct ata_host *host, struct device *dev,
6527 unsigned long flags, const struct ata_port_operations *ops)
6529 spin_lock_init(&host->lock);
6531 host->flags = flags;
6536 * ata_host_register - register initialized ATA host
6537 * @host: ATA host to register
6538 * @sht: template for SCSI host
6540 * Register initialized ATA host. @host is allocated using
6541 * ata_host_alloc() and fully initialized by LLD. This function
6542 * starts ports, registers @host with ATA and SCSI layers and
6543 * probe registered devices.
6546 * Inherited from calling layer (may sleep).
6549 * 0 on success, -errno otherwise.
6551 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6555 /* host must have been started */
6556 if (!(host->flags & ATA_HOST_STARTED)) {
6557 dev_printk(KERN_ERR, host->dev,
6558 "BUG: trying to register unstarted host\n");
6563 /* Blow away unused ports. This happens when LLD can't
6564 * determine the exact number of ports to allocate at
6567 for (i = host->n_ports; host->ports[i]; i++)
6568 kfree(host->ports[i]);
6570 /* give ports names and add SCSI hosts */
6571 for (i = 0; i < host->n_ports; i++)
6572 host->ports[i]->print_id = ata_print_id++;
6574 rc = ata_scsi_add_hosts(host, sht);
6578 /* associate with ACPI nodes */
6579 ata_acpi_associate(host);
6581 /* set cable, sata_spd_limit and report */
6582 for (i = 0; i < host->n_ports; i++) {
6583 struct ata_port *ap = host->ports[i];
6584 unsigned long xfer_mask;
6586 /* set SATA cable type if still unset */
6587 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6588 ap->cbl = ATA_CBL_SATA;
6590 /* init sata_spd_limit to the current value */
6591 sata_link_init_spd(&ap->link);
6593 /* print per-port info to dmesg */
6594 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6597 if (!ata_port_is_dummy(ap))
6598 ata_port_printk(ap, KERN_INFO,
6599 "%cATA max %s %s\n",
6600 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6601 ata_mode_string(xfer_mask),
6602 ap->link.eh_info.desc);
6604 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6607 /* perform each probe synchronously */
6608 DPRINTK("probe begin\n");
6609 for (i = 0; i < host->n_ports; i++) {
6610 struct ata_port *ap = host->ports[i];
6614 if (ap->ops->error_handler) {
6615 struct ata_eh_info *ehi = &ap->link.eh_info;
6616 unsigned long flags;
6620 /* kick EH for boot probing */
6621 spin_lock_irqsave(ap->lock, flags);
6624 (1 << ata_link_max_devices(&ap->link)) - 1;
6625 ehi->action |= ATA_EH_SOFTRESET;
6626 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6628 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6629 ap->pflags |= ATA_PFLAG_LOADING;
6630 ata_port_schedule_eh(ap);
6632 spin_unlock_irqrestore(ap->lock, flags);
6634 /* wait for EH to finish */
6635 ata_port_wait_eh(ap);
6637 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6638 rc = ata_bus_probe(ap);
6639 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6642 /* FIXME: do something useful here?
6643 * Current libata behavior will
6644 * tear down everything when
6645 * the module is removed
6646 * or the h/w is unplugged.
6652 /* probes are done, now scan each port's disk(s) */
6653 DPRINTK("host probe begin\n");
6654 for (i = 0; i < host->n_ports; i++) {
6655 struct ata_port *ap = host->ports[i];
6657 ata_scsi_scan_host(ap, 1);
6664 * ata_host_activate - start host, request IRQ and register it
6665 * @host: target ATA host
6666 * @irq: IRQ to request
6667 * @irq_handler: irq_handler used when requesting IRQ
6668 * @irq_flags: irq_flags used when requesting IRQ
6669 * @sht: scsi_host_template to use when registering the host
6671 * After allocating an ATA host and initializing it, most libata
6672 * LLDs perform three steps to activate the host - start host,
6673 * request IRQ and register it. This helper takes necessasry
6674 * arguments and performs the three steps in one go.
6677 * Inherited from calling layer (may sleep).
6680 * 0 on success, -errno otherwise.
6682 int ata_host_activate(struct ata_host *host, int irq,
6683 irq_handler_t irq_handler, unsigned long irq_flags,
6684 struct scsi_host_template *sht)
6688 rc = ata_host_start(host);
6692 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6693 dev_driver_string(host->dev), host);
6697 for (i = 0; i < host->n_ports; i++)
6698 ata_port_desc(host->ports[i], "irq %d", irq);
6700 rc = ata_host_register(host, sht);
6701 /* if failed, just free the IRQ and leave ports alone */
6703 devm_free_irq(host->dev, irq, host);
6709 * ata_port_detach - Detach ATA port in prepration of device removal
6710 * @ap: ATA port to be detached
6712 * Detach all ATA devices and the associated SCSI devices of @ap;
6713 * then, remove the associated SCSI host. @ap is guaranteed to
6714 * be quiescent on return from this function.
6717 * Kernel thread context (may sleep).
6719 void ata_port_detach(struct ata_port *ap)
6721 unsigned long flags;
6722 struct ata_link *link;
6723 struct ata_device *dev;
6725 if (!ap->ops->error_handler)
6728 /* tell EH we're leaving & flush EH */
6729 spin_lock_irqsave(ap->lock, flags);
6730 ap->pflags |= ATA_PFLAG_UNLOADING;
6731 spin_unlock_irqrestore(ap->lock, flags);
6733 ata_port_wait_eh(ap);
6735 /* EH is now guaranteed to see UNLOADING, so no new device
6736 * will be attached. Disable all existing devices.
6738 spin_lock_irqsave(ap->lock, flags);
6740 ata_port_for_each_link(link, ap) {
6741 ata_link_for_each_dev(dev, link)
6742 ata_dev_disable(dev);
6745 spin_unlock_irqrestore(ap->lock, flags);
6747 /* Final freeze & EH. All in-flight commands are aborted. EH
6748 * will be skipped and retrials will be terminated with bad
6751 spin_lock_irqsave(ap->lock, flags);
6752 ata_port_freeze(ap); /* won't be thawed */
6753 spin_unlock_irqrestore(ap->lock, flags);
6755 ata_port_wait_eh(ap);
6756 cancel_rearming_delayed_work(&ap->hotplug_task);
6759 /* remove the associated SCSI host */
6760 scsi_remove_host(ap->scsi_host);
6764 * ata_host_detach - Detach all ports of an ATA host
6765 * @host: Host to detach
6767 * Detach all ports of @host.
6770 * Kernel thread context (may sleep).
6772 void ata_host_detach(struct ata_host *host)
6776 for (i = 0; i < host->n_ports; i++)
6777 ata_port_detach(host->ports[i]);
6781 * ata_std_ports - initialize ioaddr with standard port offsets.
6782 * @ioaddr: IO address structure to be initialized
6784 * Utility function which initializes data_addr, error_addr,
6785 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6786 * device_addr, status_addr, and command_addr to standard offsets
6787 * relative to cmd_addr.
6789 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6792 void ata_std_ports(struct ata_ioports *ioaddr)
6794 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6795 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6796 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6797 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6798 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6799 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6800 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6801 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6802 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6803 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6810 * ata_pci_remove_one - PCI layer callback for device removal
6811 * @pdev: PCI device that was removed
6813 * PCI layer indicates to libata via this hook that hot-unplug or
6814 * module unload event has occurred. Detach all ports. Resource
6815 * release is handled via devres.
6818 * Inherited from PCI layer (may sleep).
6820 void ata_pci_remove_one(struct pci_dev *pdev)
6822 struct device *dev = pci_dev_to_dev(pdev);
6823 struct ata_host *host = dev_get_drvdata(dev);
6825 ata_host_detach(host);
6828 /* move to PCI subsystem */
6829 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6831 unsigned long tmp = 0;
6833 switch (bits->width) {
6836 pci_read_config_byte(pdev, bits->reg, &tmp8);
6842 pci_read_config_word(pdev, bits->reg, &tmp16);
6848 pci_read_config_dword(pdev, bits->reg, &tmp32);
6859 return (tmp == bits->val) ? 1 : 0;
6863 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6865 pci_save_state(pdev);
6866 pci_disable_device(pdev);
6868 if (mesg.event == PM_EVENT_SUSPEND)
6869 pci_set_power_state(pdev, PCI_D3hot);
6872 int ata_pci_device_do_resume(struct pci_dev *pdev)
6876 pci_set_power_state(pdev, PCI_D0);
6877 pci_restore_state(pdev);
6879 rc = pcim_enable_device(pdev);
6881 dev_printk(KERN_ERR, &pdev->dev,
6882 "failed to enable device after resume (%d)\n", rc);
6886 pci_set_master(pdev);
6890 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6892 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6895 rc = ata_host_suspend(host, mesg);
6899 ata_pci_device_do_suspend(pdev, mesg);
6904 int ata_pci_device_resume(struct pci_dev *pdev)
6906 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6909 rc = ata_pci_device_do_resume(pdev);
6911 ata_host_resume(host);
6914 #endif /* CONFIG_PM */
6916 #endif /* CONFIG_PCI */
6919 static int __init ata_init(void)
6921 ata_probe_timeout *= HZ;
6922 ata_wq = create_workqueue("ata");
6926 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6928 destroy_workqueue(ata_wq);
6932 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6936 static void __exit ata_exit(void)
6938 destroy_workqueue(ata_wq);
6939 destroy_workqueue(ata_aux_wq);
6942 subsys_initcall(ata_init);
6943 module_exit(ata_exit);
6945 static unsigned long ratelimit_time;
6946 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6948 int ata_ratelimit(void)
6951 unsigned long flags;
6953 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6955 if (time_after(jiffies, ratelimit_time)) {
6957 ratelimit_time = jiffies + (HZ/5);
6961 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6967 * ata_wait_register - wait until register value changes
6968 * @reg: IO-mapped register
6969 * @mask: Mask to apply to read register value
6970 * @val: Wait condition
6971 * @interval_msec: polling interval in milliseconds
6972 * @timeout_msec: timeout in milliseconds
6974 * Waiting for some bits of register to change is a common
6975 * operation for ATA controllers. This function reads 32bit LE
6976 * IO-mapped register @reg and tests for the following condition.
6978 * (*@reg & mask) != val
6980 * If the condition is met, it returns; otherwise, the process is
6981 * repeated after @interval_msec until timeout.
6984 * Kernel thread context (may sleep)
6987 * The final register value.
6989 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6990 unsigned long interval_msec,
6991 unsigned long timeout_msec)
6993 unsigned long timeout;
6996 tmp = ioread32(reg);
6998 /* Calculate timeout _after_ the first read to make sure
6999 * preceding writes reach the controller before starting to
7000 * eat away the timeout.
7002 timeout = jiffies + (timeout_msec * HZ) / 1000;
7004 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7005 msleep(interval_msec);
7006 tmp = ioread32(reg);
7015 static void ata_dummy_noret(struct ata_port *ap) { }
7016 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7017 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7019 static u8 ata_dummy_check_status(struct ata_port *ap)
7024 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7026 return AC_ERR_SYSTEM;
7029 const struct ata_port_operations ata_dummy_port_ops = {
7030 .check_status = ata_dummy_check_status,
7031 .check_altstatus = ata_dummy_check_status,
7032 .dev_select = ata_noop_dev_select,
7033 .qc_prep = ata_noop_qc_prep,
7034 .qc_issue = ata_dummy_qc_issue,
7035 .freeze = ata_dummy_noret,
7036 .thaw = ata_dummy_noret,
7037 .error_handler = ata_dummy_noret,
7038 .post_internal_cmd = ata_dummy_qc_noret,
7039 .irq_clear = ata_dummy_noret,
7040 .port_start = ata_dummy_ret0,
7041 .port_stop = ata_dummy_noret,
7044 const struct ata_port_info ata_dummy_port_info = {
7045 .port_ops = &ata_dummy_port_ops,
7049 * libata is essentially a library of internal helper functions for
7050 * low-level ATA host controller drivers. As such, the API/ABI is
7051 * likely to change as new drivers are added and updated.
7052 * Do not depend on ABI/API stability.
7055 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7056 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7057 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
7058 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
7059 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
7060 EXPORT_SYMBOL_GPL(ata_std_bios_param);
7061 EXPORT_SYMBOL_GPL(ata_std_ports);
7062 EXPORT_SYMBOL_GPL(ata_host_init);
7063 EXPORT_SYMBOL_GPL(ata_host_alloc);
7064 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
7065 EXPORT_SYMBOL_GPL(ata_host_start);
7066 EXPORT_SYMBOL_GPL(ata_host_register);
7067 EXPORT_SYMBOL_GPL(ata_host_activate);
7068 EXPORT_SYMBOL_GPL(ata_host_detach);
7069 EXPORT_SYMBOL_GPL(ata_sg_init);
7070 EXPORT_SYMBOL_GPL(ata_sg_init_one);
7071 EXPORT_SYMBOL_GPL(ata_hsm_move);
7072 EXPORT_SYMBOL_GPL(ata_qc_complete);
7073 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
7074 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
7075 EXPORT_SYMBOL_GPL(ata_tf_load);
7076 EXPORT_SYMBOL_GPL(ata_tf_read);
7077 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7078 EXPORT_SYMBOL_GPL(ata_std_dev_select);
7079 EXPORT_SYMBOL_GPL(sata_print_link_status);
7080 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7081 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7082 EXPORT_SYMBOL_GPL(ata_check_status);
7083 EXPORT_SYMBOL_GPL(ata_altstatus);
7084 EXPORT_SYMBOL_GPL(ata_exec_command);
7085 EXPORT_SYMBOL_GPL(ata_port_start);
7086 EXPORT_SYMBOL_GPL(ata_sff_port_start);
7087 EXPORT_SYMBOL_GPL(ata_interrupt);
7088 EXPORT_SYMBOL_GPL(ata_do_set_mode);
7089 EXPORT_SYMBOL_GPL(ata_data_xfer);
7090 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
7091 EXPORT_SYMBOL_GPL(ata_qc_prep);
7092 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
7093 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
7094 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7095 EXPORT_SYMBOL_GPL(ata_bmdma_start);
7096 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7097 EXPORT_SYMBOL_GPL(ata_bmdma_status);
7098 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
7099 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7100 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7101 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7102 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7103 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
7104 EXPORT_SYMBOL_GPL(ata_port_probe);
7105 EXPORT_SYMBOL_GPL(ata_dev_disable);
7106 EXPORT_SYMBOL_GPL(sata_set_spd);
7107 EXPORT_SYMBOL_GPL(sata_link_debounce);
7108 EXPORT_SYMBOL_GPL(sata_link_resume);
7109 EXPORT_SYMBOL_GPL(sata_phy_reset);
7110 EXPORT_SYMBOL_GPL(__sata_phy_reset);
7111 EXPORT_SYMBOL_GPL(ata_bus_reset);
7112 EXPORT_SYMBOL_GPL(ata_std_prereset);
7113 EXPORT_SYMBOL_GPL(ata_std_softreset);
7114 EXPORT_SYMBOL_GPL(sata_link_hardreset);
7115 EXPORT_SYMBOL_GPL(sata_std_hardreset);
7116 EXPORT_SYMBOL_GPL(ata_std_postreset);
7117 EXPORT_SYMBOL_GPL(ata_dev_classify);
7118 EXPORT_SYMBOL_GPL(ata_dev_pair);
7119 EXPORT_SYMBOL_GPL(ata_port_disable);
7120 EXPORT_SYMBOL_GPL(ata_ratelimit);
7121 EXPORT_SYMBOL_GPL(ata_wait_register);
7122 EXPORT_SYMBOL_GPL(ata_busy_sleep);
7123 EXPORT_SYMBOL_GPL(ata_wait_ready);
7124 EXPORT_SYMBOL_GPL(ata_port_queue_task);
7125 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7126 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
7127 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
7128 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
7129 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
7130 EXPORT_SYMBOL_GPL(ata_host_intr);
7131 EXPORT_SYMBOL_GPL(sata_scr_valid);
7132 EXPORT_SYMBOL_GPL(sata_scr_read);
7133 EXPORT_SYMBOL_GPL(sata_scr_write);
7134 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
7135 EXPORT_SYMBOL_GPL(ata_link_online);
7136 EXPORT_SYMBOL_GPL(ata_link_offline);
7138 EXPORT_SYMBOL_GPL(ata_host_suspend);
7139 EXPORT_SYMBOL_GPL(ata_host_resume);
7140 #endif /* CONFIG_PM */
7141 EXPORT_SYMBOL_GPL(ata_id_string);
7142 EXPORT_SYMBOL_GPL(ata_id_c_string);
7143 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
7144 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7146 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
7147 EXPORT_SYMBOL_GPL(ata_timing_compute);
7148 EXPORT_SYMBOL_GPL(ata_timing_merge);
7151 EXPORT_SYMBOL_GPL(pci_test_config_bits);
7152 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
7153 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
7154 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
7155 EXPORT_SYMBOL_GPL(ata_pci_init_one);
7156 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
7158 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7159 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
7160 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7161 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
7162 #endif /* CONFIG_PM */
7163 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7164 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
7165 #endif /* CONFIG_PCI */
7167 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7168 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7169 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7170 EXPORT_SYMBOL_GPL(ata_port_desc);
7172 EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7173 #endif /* CONFIG_PCI */
7174 EXPORT_SYMBOL_GPL(ata_eng_timeout);
7175 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7176 EXPORT_SYMBOL_GPL(ata_link_abort);
7177 EXPORT_SYMBOL_GPL(ata_port_abort);
7178 EXPORT_SYMBOL_GPL(ata_port_freeze);
7179 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7180 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7181 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7182 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7183 EXPORT_SYMBOL_GPL(ata_do_eh);
7184 EXPORT_SYMBOL_GPL(ata_irq_on);
7185 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7187 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7188 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7189 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7190 EXPORT_SYMBOL_GPL(ata_cable_sata);