2 * libata-core.c - helper library for ATA
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/list.h>
41 #include <linux/highmem.h>
42 #include <linux/spinlock.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/timer.h>
46 #include <linux/interrupt.h>
47 #include <linux/completion.h>
48 #include <linux/suspend.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/scatterlist.h>
52 #include <scsi/scsi.h>
53 #include <scsi/scsi_cmnd.h>
54 #include <scsi/scsi_host.h>
55 #include <linux/libata.h>
57 #include <asm/semaphore.h>
58 #include <asm/byteorder.h>
62 #define DRV_VERSION "2.21" /* must be exactly four chars */
65 /* debounce timing parameters in msecs { interval, duration, timeout } */
66 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
70 static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72 static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73 static void ata_dev_xfermask(struct ata_device *dev);
74 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
76 unsigned int ata_print_id = 1;
77 static struct workqueue_struct *ata_wq;
79 struct workqueue_struct *ata_aux_wq;
81 int atapi_enabled = 1;
82 module_param(atapi_enabled, int, 0444);
83 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
86 module_param(atapi_dmadir, int, 0444);
87 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
90 module_param_named(fua, libata_fua, int, 0444);
91 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
93 static int ata_ignore_hpa = 0;
94 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
95 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
97 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
98 module_param(ata_probe_timeout, int, 0444);
99 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
101 int libata_noacpi = 1;
102 module_param_named(noacpi, libata_noacpi, int, 0444);
103 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
105 MODULE_AUTHOR("Jeff Garzik");
106 MODULE_DESCRIPTION("Library module for ATA devices");
107 MODULE_LICENSE("GPL");
108 MODULE_VERSION(DRV_VERSION);
112 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
113 * @tf: Taskfile to convert
114 * @pmp: Port multiplier port
115 * @is_cmd: This FIS is for command
116 * @fis: Buffer into which data will output
118 * Converts a standard ATA taskfile to a Serial ATA
119 * FIS structure (Register - Host to Device).
122 * Inherited from caller.
124 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
126 fis[0] = 0x27; /* Register - Host to Device FIS */
127 fis[1] = pmp & 0xf; /* Port multiplier number*/
129 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
131 fis[2] = tf->command;
132 fis[3] = tf->feature;
139 fis[8] = tf->hob_lbal;
140 fis[9] = tf->hob_lbam;
141 fis[10] = tf->hob_lbah;
142 fis[11] = tf->hob_feature;
145 fis[13] = tf->hob_nsect;
156 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
157 * @fis: Buffer from which data will be input
158 * @tf: Taskfile to output
160 * Converts a serial ATA FIS structure to a standard ATA taskfile.
163 * Inherited from caller.
166 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
168 tf->command = fis[2]; /* status */
169 tf->feature = fis[3]; /* error */
176 tf->hob_lbal = fis[8];
177 tf->hob_lbam = fis[9];
178 tf->hob_lbah = fis[10];
181 tf->hob_nsect = fis[13];
184 static const u8 ata_rw_cmds[] = {
188 ATA_CMD_READ_MULTI_EXT,
189 ATA_CMD_WRITE_MULTI_EXT,
193 ATA_CMD_WRITE_MULTI_FUA_EXT,
197 ATA_CMD_PIO_READ_EXT,
198 ATA_CMD_PIO_WRITE_EXT,
211 ATA_CMD_WRITE_FUA_EXT
215 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
216 * @tf: command to examine and configure
217 * @dev: device tf belongs to
219 * Examine the device configuration and tf->flags to calculate
220 * the proper read/write commands and protocol to use.
225 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
229 int index, fua, lba48, write;
231 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
232 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
233 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
235 if (dev->flags & ATA_DFLAG_PIO) {
236 tf->protocol = ATA_PROT_PIO;
237 index = dev->multi_count ? 0 : 8;
238 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
239 /* Unable to use DMA due to host limitation */
240 tf->protocol = ATA_PROT_PIO;
241 index = dev->multi_count ? 0 : 8;
243 tf->protocol = ATA_PROT_DMA;
247 cmd = ata_rw_cmds[index + fua + lba48 + write];
256 * ata_tf_read_block - Read block address from ATA taskfile
257 * @tf: ATA taskfile of interest
258 * @dev: ATA device @tf belongs to
263 * Read block address from @tf. This function can handle all
264 * three address formats - LBA, LBA48 and CHS. tf->protocol and
265 * flags select the address format to use.
268 * Block address read from @tf.
270 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274 if (tf->flags & ATA_TFLAG_LBA) {
275 if (tf->flags & ATA_TFLAG_LBA48) {
276 block |= (u64)tf->hob_lbah << 40;
277 block |= (u64)tf->hob_lbam << 32;
278 block |= tf->hob_lbal << 24;
280 block |= (tf->device & 0xf) << 24;
282 block |= tf->lbah << 16;
283 block |= tf->lbam << 8;
288 cyl = tf->lbam | (tf->lbah << 8);
289 head = tf->device & 0xf;
292 block = (cyl * dev->heads + head) * dev->sectors + sect;
299 * ata_build_rw_tf - Build ATA taskfile for given read/write request
300 * @tf: Target ATA taskfile
301 * @dev: ATA device @tf belongs to
302 * @block: Block address
303 * @n_block: Number of blocks
304 * @tf_flags: RW/FUA etc...
310 * Build ATA taskfile @tf for read/write request described by
311 * @block, @n_block, @tf_flags and @tag on @dev.
315 * 0 on success, -ERANGE if the request is too large for @dev,
316 * -EINVAL if the request is invalid.
318 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
319 u64 block, u32 n_block, unsigned int tf_flags,
322 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
323 tf->flags |= tf_flags;
325 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
327 if (!lba_48_ok(block, n_block))
330 tf->protocol = ATA_PROT_NCQ;
331 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
333 if (tf->flags & ATA_TFLAG_WRITE)
334 tf->command = ATA_CMD_FPDMA_WRITE;
336 tf->command = ATA_CMD_FPDMA_READ;
338 tf->nsect = tag << 3;
339 tf->hob_feature = (n_block >> 8) & 0xff;
340 tf->feature = n_block & 0xff;
342 tf->hob_lbah = (block >> 40) & 0xff;
343 tf->hob_lbam = (block >> 32) & 0xff;
344 tf->hob_lbal = (block >> 24) & 0xff;
345 tf->lbah = (block >> 16) & 0xff;
346 tf->lbam = (block >> 8) & 0xff;
347 tf->lbal = block & 0xff;
350 if (tf->flags & ATA_TFLAG_FUA)
351 tf->device |= 1 << 7;
352 } else if (dev->flags & ATA_DFLAG_LBA) {
353 tf->flags |= ATA_TFLAG_LBA;
355 if (lba_28_ok(block, n_block)) {
357 tf->device |= (block >> 24) & 0xf;
358 } else if (lba_48_ok(block, n_block)) {
359 if (!(dev->flags & ATA_DFLAG_LBA48))
363 tf->flags |= ATA_TFLAG_LBA48;
365 tf->hob_nsect = (n_block >> 8) & 0xff;
367 tf->hob_lbah = (block >> 40) & 0xff;
368 tf->hob_lbam = (block >> 32) & 0xff;
369 tf->hob_lbal = (block >> 24) & 0xff;
371 /* request too large even for LBA48 */
374 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
377 tf->nsect = n_block & 0xff;
379 tf->lbah = (block >> 16) & 0xff;
380 tf->lbam = (block >> 8) & 0xff;
381 tf->lbal = block & 0xff;
383 tf->device |= ATA_LBA;
386 u32 sect, head, cyl, track;
388 /* The request -may- be too large for CHS addressing. */
389 if (!lba_28_ok(block, n_block))
392 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
395 /* Convert LBA to CHS */
396 track = (u32)block / dev->sectors;
397 cyl = track / dev->heads;
398 head = track % dev->heads;
399 sect = (u32)block % dev->sectors + 1;
401 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
402 (u32)block, track, cyl, head, sect);
404 /* Check whether the converted CHS can fit.
408 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
411 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
422 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
423 * @pio_mask: pio_mask
424 * @mwdma_mask: mwdma_mask
425 * @udma_mask: udma_mask
427 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
428 * unsigned int xfer_mask.
436 static unsigned int ata_pack_xfermask(unsigned int pio_mask,
437 unsigned int mwdma_mask,
438 unsigned int udma_mask)
440 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
441 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
442 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
447 * @xfer_mask: xfer_mask to unpack
448 * @pio_mask: resulting pio_mask
449 * @mwdma_mask: resulting mwdma_mask
450 * @udma_mask: resulting udma_mask
452 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
453 * Any NULL distination masks will be ignored.
455 static void ata_unpack_xfermask(unsigned int xfer_mask,
456 unsigned int *pio_mask,
457 unsigned int *mwdma_mask,
458 unsigned int *udma_mask)
461 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
463 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
465 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
468 static const struct ata_xfer_ent {
472 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
473 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
474 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
479 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
480 * @xfer_mask: xfer_mask of interest
482 * Return matching XFER_* value for @xfer_mask. Only the highest
483 * bit of @xfer_mask is considered.
489 * Matching XFER_* value, 0 if no match found.
491 static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
493 int highbit = fls(xfer_mask) - 1;
494 const struct ata_xfer_ent *ent;
496 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
497 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
498 return ent->base + highbit - ent->shift;
503 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
504 * @xfer_mode: XFER_* of interest
506 * Return matching xfer_mask for @xfer_mode.
512 * Matching xfer_mask, 0 if no match found.
514 static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
516 const struct ata_xfer_ent *ent;
518 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
519 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
520 return 1 << (ent->shift + xfer_mode - ent->base);
525 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
526 * @xfer_mode: XFER_* of interest
528 * Return matching xfer_shift for @xfer_mode.
534 * Matching xfer_shift, -1 if no match found.
536 static int ata_xfer_mode2shift(unsigned int xfer_mode)
538 const struct ata_xfer_ent *ent;
540 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
541 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
547 * ata_mode_string - convert xfer_mask to string
548 * @xfer_mask: mask of bits supported; only highest bit counts.
550 * Determine string which represents the highest speed
551 * (highest bit in @modemask).
557 * Constant C string representing highest speed listed in
558 * @mode_mask, or the constant C string "<n/a>".
560 static const char *ata_mode_string(unsigned int xfer_mask)
562 static const char * const xfer_mode_str[] = {
586 highbit = fls(xfer_mask) - 1;
587 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
588 return xfer_mode_str[highbit];
592 static const char *sata_spd_string(unsigned int spd)
594 static const char * const spd_str[] = {
599 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
601 return spd_str[spd - 1];
604 void ata_dev_disable(struct ata_device *dev)
606 if (ata_dev_enabled(dev)) {
607 if (ata_msg_drv(dev->link->ap))
608 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
609 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
616 * ata_devchk - PATA device presence detection
617 * @ap: ATA channel to examine
618 * @device: Device to examine (starting at zero)
620 * This technique was originally described in
621 * Hale Landis's ATADRVR (www.ata-atapi.com), and
622 * later found its way into the ATA/ATAPI spec.
624 * Write a pattern to the ATA shadow registers,
625 * and if a device is present, it will respond by
626 * correctly storing and echoing back the
627 * ATA shadow register contents.
633 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
635 struct ata_ioports *ioaddr = &ap->ioaddr;
638 ap->ops->dev_select(ap, device);
640 iowrite8(0x55, ioaddr->nsect_addr);
641 iowrite8(0xaa, ioaddr->lbal_addr);
643 iowrite8(0xaa, ioaddr->nsect_addr);
644 iowrite8(0x55, ioaddr->lbal_addr);
646 iowrite8(0x55, ioaddr->nsect_addr);
647 iowrite8(0xaa, ioaddr->lbal_addr);
649 nsect = ioread8(ioaddr->nsect_addr);
650 lbal = ioread8(ioaddr->lbal_addr);
652 if ((nsect == 0x55) && (lbal == 0xaa))
653 return 1; /* we found a device */
655 return 0; /* nothing found */
659 * ata_dev_classify - determine device type based on ATA-spec signature
660 * @tf: ATA taskfile register set for device to be identified
662 * Determine from taskfile register contents whether a device is
663 * ATA or ATAPI, as per "Signature and persistence" section
664 * of ATA/PI spec (volume 1, sect 5.14).
670 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
671 * the event of failure.
674 unsigned int ata_dev_classify(const struct ata_taskfile *tf)
676 /* Apple's open source Darwin code hints that some devices only
677 * put a proper signature into the LBA mid/high registers,
678 * So, we only check those. It's sufficient for uniqueness.
681 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
682 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
683 DPRINTK("found ATA device by sig\n");
687 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
688 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
689 DPRINTK("found ATAPI device by sig\n");
690 return ATA_DEV_ATAPI;
693 DPRINTK("unknown device\n");
694 return ATA_DEV_UNKNOWN;
698 * ata_dev_try_classify - Parse returned ATA device signature
699 * @ap: ATA channel to examine
700 * @device: Device to examine (starting at zero)
701 * @r_err: Value of error register on completion
703 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
704 * an ATA/ATAPI-defined set of values is placed in the ATA
705 * shadow registers, indicating the results of device detection
708 * Select the ATA device, and read the values from the ATA shadow
709 * registers. Then parse according to the Error register value,
710 * and the spec-defined values examined by ata_dev_classify().
716 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
720 ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
722 struct ata_taskfile tf;
726 ap->ops->dev_select(ap, device);
728 memset(&tf, 0, sizeof(tf));
730 ap->ops->tf_read(ap, &tf);
735 /* see if device passed diags: if master then continue and warn later */
736 if (err == 0 && device == 0)
737 /* diagnostic fail : do nothing _YET_ */
738 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
741 else if ((device == 0) && (err == 0x81))
746 /* determine if device is ATA or ATAPI */
747 class = ata_dev_classify(&tf);
749 if (class == ATA_DEV_UNKNOWN)
751 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
757 * ata_id_string - Convert IDENTIFY DEVICE page into string
758 * @id: IDENTIFY DEVICE results we will examine
759 * @s: string into which data is output
760 * @ofs: offset into identify device page
761 * @len: length of string to return. must be an even number.
763 * The strings in the IDENTIFY DEVICE page are broken up into
764 * 16-bit chunks. Run through the string, and output each
765 * 8-bit chunk linearly, regardless of platform.
771 void ata_id_string(const u16 *id, unsigned char *s,
772 unsigned int ofs, unsigned int len)
791 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
792 * @id: IDENTIFY DEVICE results we will examine
793 * @s: string into which data is output
794 * @ofs: offset into identify device page
795 * @len: length of string to return. must be an odd number.
797 * This function is identical to ata_id_string except that it
798 * trims trailing spaces and terminates the resulting string with
799 * null. @len must be actual maximum length (even number) + 1.
804 void ata_id_c_string(const u16 *id, unsigned char *s,
805 unsigned int ofs, unsigned int len)
811 ata_id_string(id, s, ofs, len - 1);
813 p = s + strnlen(s, len - 1);
814 while (p > s && p[-1] == ' ')
819 static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
823 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
824 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
825 sectors |= (tf->hob_lbal & 0xff) << 24;
826 sectors |= (tf->lbah & 0xff) << 16;
827 sectors |= (tf->lbam & 0xff) << 8;
828 sectors |= (tf->lbal & 0xff);
833 static u64 ata_tf_to_lba(struct ata_taskfile *tf)
837 sectors |= (tf->device & 0x0f) << 24;
838 sectors |= (tf->lbah & 0xff) << 16;
839 sectors |= (tf->lbam & 0xff) << 8;
840 sectors |= (tf->lbal & 0xff);
846 * ata_read_native_max_address_ext - LBA48 native max query
847 * @dev: Device to query
849 * Perform an LBA48 size query upon the device in question. Return the
850 * actual LBA48 size or zero if the command fails.
853 static u64 ata_read_native_max_address_ext(struct ata_device *dev)
856 struct ata_taskfile tf;
858 ata_tf_init(dev, &tf);
860 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
861 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
862 tf.protocol |= ATA_PROT_NODATA;
865 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
869 return ata_tf_to_lba48(&tf);
873 * ata_read_native_max_address - LBA28 native max query
874 * @dev: Device to query
876 * Performa an LBA28 size query upon the device in question. Return the
877 * actual LBA28 size or zero if the command fails.
880 static u64 ata_read_native_max_address(struct ata_device *dev)
883 struct ata_taskfile tf;
885 ata_tf_init(dev, &tf);
887 tf.command = ATA_CMD_READ_NATIVE_MAX;
888 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
889 tf.protocol |= ATA_PROT_NODATA;
892 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
896 return ata_tf_to_lba(&tf);
900 * ata_set_native_max_address_ext - LBA48 native max set
901 * @dev: Device to query
902 * @new_sectors: new max sectors value to set for the device
904 * Perform an LBA48 size set max upon the device in question. Return the
905 * actual LBA48 size or zero if the command fails.
908 static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
911 struct ata_taskfile tf;
915 ata_tf_init(dev, &tf);
917 tf.command = ATA_CMD_SET_MAX_EXT;
918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
919 tf.protocol |= ATA_PROT_NODATA;
922 tf.lbal = (new_sectors >> 0) & 0xff;
923 tf.lbam = (new_sectors >> 8) & 0xff;
924 tf.lbah = (new_sectors >> 16) & 0xff;
926 tf.hob_lbal = (new_sectors >> 24) & 0xff;
927 tf.hob_lbam = (new_sectors >> 32) & 0xff;
928 tf.hob_lbah = (new_sectors >> 40) & 0xff;
930 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
934 return ata_tf_to_lba48(&tf);
938 * ata_set_native_max_address - LBA28 native max set
939 * @dev: Device to query
940 * @new_sectors: new max sectors value to set for the device
942 * Perform an LBA28 size set max upon the device in question. Return the
943 * actual LBA28 size or zero if the command fails.
946 static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
949 struct ata_taskfile tf;
953 ata_tf_init(dev, &tf);
955 tf.command = ATA_CMD_SET_MAX;
956 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
957 tf.protocol |= ATA_PROT_NODATA;
959 tf.lbal = (new_sectors >> 0) & 0xff;
960 tf.lbam = (new_sectors >> 8) & 0xff;
961 tf.lbah = (new_sectors >> 16) & 0xff;
962 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
964 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
968 return ata_tf_to_lba(&tf);
972 * ata_hpa_resize - Resize a device with an HPA set
973 * @dev: Device to resize
975 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
976 * it if required to the full size of the media. The caller must check
977 * the drive has the HPA feature set enabled.
980 static u64 ata_hpa_resize(struct ata_device *dev)
982 u64 sectors = dev->n_sectors;
985 if (ata_id_has_lba48(dev->id))
986 hpa_sectors = ata_read_native_max_address_ext(dev);
988 hpa_sectors = ata_read_native_max_address(dev);
990 if (hpa_sectors > sectors) {
991 ata_dev_printk(dev, KERN_INFO,
992 "Host Protected Area detected:\n"
993 "\tcurrent size: %lld sectors\n"
994 "\tnative size: %lld sectors\n",
995 (long long)sectors, (long long)hpa_sectors);
997 if (ata_ignore_hpa) {
998 if (ata_id_has_lba48(dev->id))
999 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1001 hpa_sectors = ata_set_native_max_address(dev,
1005 ata_dev_printk(dev, KERN_INFO, "native size "
1006 "increased to %lld sectors\n",
1007 (long long)hpa_sectors);
1011 } else if (hpa_sectors < sectors)
1012 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1013 "is smaller than sectors (%lld)\n", __FUNCTION__,
1014 (long long)hpa_sectors, (long long)sectors);
1019 static u64 ata_id_n_sectors(const u16 *id)
1021 if (ata_id_has_lba(id)) {
1022 if (ata_id_has_lba48(id))
1023 return ata_id_u64(id, 100);
1025 return ata_id_u32(id, 60);
1027 if (ata_id_current_chs_valid(id))
1028 return ata_id_u32(id, 57);
1030 return id[1] * id[3] * id[6];
1035 * ata_id_to_dma_mode - Identify DMA mode from id block
1036 * @dev: device to identify
1037 * @unknown: mode to assume if we cannot tell
1039 * Set up the timing values for the device based upon the identify
1040 * reported values for the DMA mode. This function is used by drivers
1041 * which rely upon firmware configured modes, but wish to report the
1042 * mode correctly when possible.
1044 * In addition we emit similarly formatted messages to the default
1045 * ata_dev_set_mode handler, in order to provide consistency of
1049 void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1054 /* Pack the DMA modes */
1055 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1056 if (dev->id[53] & 0x04)
1057 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1059 /* Select the mode in use */
1060 mode = ata_xfer_mask2mode(mask);
1063 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1064 ata_mode_string(mask));
1066 /* SWDMA perhaps ? */
1068 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1071 /* Configure the device reporting */
1072 dev->xfer_mode = mode;
1073 dev->xfer_shift = ata_xfer_mode2shift(mode);
1077 * ata_noop_dev_select - Select device 0/1 on ATA bus
1078 * @ap: ATA channel to manipulate
1079 * @device: ATA device (numbered from zero) to select
1081 * This function performs no actual function.
1083 * May be used as the dev_select() entry in ata_port_operations.
1088 void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1094 * ata_std_dev_select - Select device 0/1 on ATA bus
1095 * @ap: ATA channel to manipulate
1096 * @device: ATA device (numbered from zero) to select
1098 * Use the method defined in the ATA specification to
1099 * make either device 0, or device 1, active on the
1100 * ATA channel. Works with both PIO and MMIO.
1102 * May be used as the dev_select() entry in ata_port_operations.
1108 void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1113 tmp = ATA_DEVICE_OBS;
1115 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1117 iowrite8(tmp, ap->ioaddr.device_addr);
1118 ata_pause(ap); /* needed; also flushes, for mmio */
1122 * ata_dev_select - Select device 0/1 on ATA bus
1123 * @ap: ATA channel to manipulate
1124 * @device: ATA device (numbered from zero) to select
1125 * @wait: non-zero to wait for Status register BSY bit to clear
1126 * @can_sleep: non-zero if context allows sleeping
1128 * Use the method defined in the ATA specification to
1129 * make either device 0, or device 1, active on the
1132 * This is a high-level version of ata_std_dev_select(),
1133 * which additionally provides the services of inserting
1134 * the proper pauses and status polling, where needed.
1140 void ata_dev_select(struct ata_port *ap, unsigned int device,
1141 unsigned int wait, unsigned int can_sleep)
1143 if (ata_msg_probe(ap))
1144 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1145 "device %u, wait %u\n", device, wait);
1150 ap->ops->dev_select(ap, device);
1153 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1160 * ata_dump_id - IDENTIFY DEVICE info debugging output
1161 * @id: IDENTIFY DEVICE page to dump
1163 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1170 static inline void ata_dump_id(const u16 *id)
1172 DPRINTK("49==0x%04x "
1182 DPRINTK("80==0x%04x "
1192 DPRINTK("88==0x%04x "
1199 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1200 * @id: IDENTIFY data to compute xfer mask from
1202 * Compute the xfermask for this device. This is not as trivial
1203 * as it seems if we must consider early devices correctly.
1205 * FIXME: pre IDE drive timing (do we care ?).
1213 static unsigned int ata_id_xfermask(const u16 *id)
1215 unsigned int pio_mask, mwdma_mask, udma_mask;
1217 /* Usual case. Word 53 indicates word 64 is valid */
1218 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1219 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1223 /* If word 64 isn't valid then Word 51 high byte holds
1224 * the PIO timing number for the maximum. Turn it into
1227 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
1228 if (mode < 5) /* Valid PIO range */
1229 pio_mask = (2 << mode) - 1;
1233 /* But wait.. there's more. Design your standards by
1234 * committee and you too can get a free iordy field to
1235 * process. However its the speeds not the modes that
1236 * are supported... Note drivers using the timing API
1237 * will get this right anyway
1241 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
1243 if (ata_id_is_cfa(id)) {
1245 * Process compact flash extended modes
1247 int pio = id[163] & 0x7;
1248 int dma = (id[163] >> 3) & 7;
1251 pio_mask |= (1 << 5);
1253 pio_mask |= (1 << 6);
1255 mwdma_mask |= (1 << 3);
1257 mwdma_mask |= (1 << 4);
1261 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1262 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
1264 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1268 * ata_port_queue_task - Queue port_task
1269 * @ap: The ata_port to queue port_task for
1270 * @fn: workqueue function to be scheduled
1271 * @data: data for @fn to use
1272 * @delay: delay time for workqueue function
1274 * Schedule @fn(@data) for execution after @delay jiffies using
1275 * port_task. There is one port_task per port and it's the
1276 * user(low level driver)'s responsibility to make sure that only
1277 * one task is active at any given time.
1279 * libata core layer takes care of synchronization between
1280 * port_task and EH. ata_port_queue_task() may be ignored for EH
1284 * Inherited from caller.
1286 void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
1287 unsigned long delay)
1289 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1290 ap->port_task_data = data;
1292 /* may fail if ata_port_flush_task() in progress */
1293 queue_delayed_work(ata_wq, &ap->port_task, delay);
1297 * ata_port_flush_task - Flush port_task
1298 * @ap: The ata_port to flush port_task for
1300 * After this function completes, port_task is guranteed not to
1301 * be running or scheduled.
1304 * Kernel thread context (may sleep)
1306 void ata_port_flush_task(struct ata_port *ap)
1310 cancel_rearming_delayed_work(&ap->port_task);
1312 if (ata_msg_ctl(ap))
1313 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
1316 static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
1318 struct completion *waiting = qc->private_data;
1324 * ata_exec_internal_sg - execute libata internal command
1325 * @dev: Device to which the command is sent
1326 * @tf: Taskfile registers for the command and the result
1327 * @cdb: CDB for packet command
1328 * @dma_dir: Data tranfer direction of the command
1329 * @sg: sg list for the data buffer of the command
1330 * @n_elem: Number of sg entries
1332 * Executes libata internal command with timeout. @tf contains
1333 * command on entry and result on return. Timeout and error
1334 * conditions are reported via return value. No recovery action
1335 * is taken after a command times out. It's caller's duty to
1336 * clean up after timeout.
1339 * None. Should be called with kernel context, might sleep.
1342 * Zero on success, AC_ERR_* mask on failure
1344 unsigned ata_exec_internal_sg(struct ata_device *dev,
1345 struct ata_taskfile *tf, const u8 *cdb,
1346 int dma_dir, struct scatterlist *sg,
1347 unsigned int n_elem)
1349 struct ata_link *link = dev->link;
1350 struct ata_port *ap = link->ap;
1351 u8 command = tf->command;
1352 struct ata_queued_cmd *qc;
1353 unsigned int tag, preempted_tag;
1354 u32 preempted_sactive, preempted_qc_active;
1355 DECLARE_COMPLETION_ONSTACK(wait);
1356 unsigned long flags;
1357 unsigned int err_mask;
1360 spin_lock_irqsave(ap->lock, flags);
1362 /* no internal command while frozen */
1363 if (ap->pflags & ATA_PFLAG_FROZEN) {
1364 spin_unlock_irqrestore(ap->lock, flags);
1365 return AC_ERR_SYSTEM;
1368 /* initialize internal qc */
1370 /* XXX: Tag 0 is used for drivers with legacy EH as some
1371 * drivers choke if any other tag is given. This breaks
1372 * ata_tag_internal() test for those drivers. Don't use new
1373 * EH stuff without converting to it.
1375 if (ap->ops->error_handler)
1376 tag = ATA_TAG_INTERNAL;
1380 if (test_and_set_bit(tag, &ap->qc_allocated))
1382 qc = __ata_qc_from_tag(ap, tag);
1390 preempted_tag = link->active_tag;
1391 preempted_sactive = link->sactive;
1392 preempted_qc_active = ap->qc_active;
1393 link->active_tag = ATA_TAG_POISON;
1397 /* prepare & issue qc */
1400 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
1401 qc->flags |= ATA_QCFLAG_RESULT_TF;
1402 qc->dma_dir = dma_dir;
1403 if (dma_dir != DMA_NONE) {
1404 unsigned int i, buflen = 0;
1406 for (i = 0; i < n_elem; i++)
1407 buflen += sg[i].length;
1409 ata_sg_init(qc, sg, n_elem);
1410 qc->nbytes = buflen;
1413 qc->private_data = &wait;
1414 qc->complete_fn = ata_qc_complete_internal;
1418 spin_unlock_irqrestore(ap->lock, flags);
1420 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
1422 ata_port_flush_task(ap);
1425 spin_lock_irqsave(ap->lock, flags);
1427 /* We're racing with irq here. If we lose, the
1428 * following test prevents us from completing the qc
1429 * twice. If we win, the port is frozen and will be
1430 * cleaned up by ->post_internal_cmd().
1432 if (qc->flags & ATA_QCFLAG_ACTIVE) {
1433 qc->err_mask |= AC_ERR_TIMEOUT;
1435 if (ap->ops->error_handler)
1436 ata_port_freeze(ap);
1438 ata_qc_complete(qc);
1440 if (ata_msg_warn(ap))
1441 ata_dev_printk(dev, KERN_WARNING,
1442 "qc timeout (cmd 0x%x)\n", command);
1445 spin_unlock_irqrestore(ap->lock, flags);
1448 /* do post_internal_cmd */
1449 if (ap->ops->post_internal_cmd)
1450 ap->ops->post_internal_cmd(qc);
1452 /* perform minimal error analysis */
1453 if (qc->flags & ATA_QCFLAG_FAILED) {
1454 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1455 qc->err_mask |= AC_ERR_DEV;
1458 qc->err_mask |= AC_ERR_OTHER;
1460 if (qc->err_mask & ~AC_ERR_OTHER)
1461 qc->err_mask &= ~AC_ERR_OTHER;
1465 spin_lock_irqsave(ap->lock, flags);
1467 *tf = qc->result_tf;
1468 err_mask = qc->err_mask;
1471 link->active_tag = preempted_tag;
1472 link->sactive = preempted_sactive;
1473 ap->qc_active = preempted_qc_active;
1475 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1476 * Until those drivers are fixed, we detect the condition
1477 * here, fail the command with AC_ERR_SYSTEM and reenable the
1480 * Note that this doesn't change any behavior as internal
1481 * command failure results in disabling the device in the
1482 * higher layer for LLDDs without new reset/EH callbacks.
1484 * Kill the following code as soon as those drivers are fixed.
1486 if (ap->flags & ATA_FLAG_DISABLED) {
1487 err_mask |= AC_ERR_SYSTEM;
1491 spin_unlock_irqrestore(ap->lock, flags);
1497 * ata_exec_internal - execute libata internal command
1498 * @dev: Device to which the command is sent
1499 * @tf: Taskfile registers for the command and the result
1500 * @cdb: CDB for packet command
1501 * @dma_dir: Data tranfer direction of the command
1502 * @buf: Data buffer of the command
1503 * @buflen: Length of data buffer
1505 * Wrapper around ata_exec_internal_sg() which takes simple
1506 * buffer instead of sg list.
1509 * None. Should be called with kernel context, might sleep.
1512 * Zero on success, AC_ERR_* mask on failure
1514 unsigned ata_exec_internal(struct ata_device *dev,
1515 struct ata_taskfile *tf, const u8 *cdb,
1516 int dma_dir, void *buf, unsigned int buflen)
1518 struct scatterlist *psg = NULL, sg;
1519 unsigned int n_elem = 0;
1521 if (dma_dir != DMA_NONE) {
1523 sg_init_one(&sg, buf, buflen);
1528 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
1532 * ata_do_simple_cmd - execute simple internal command
1533 * @dev: Device to which the command is sent
1534 * @cmd: Opcode to execute
1536 * Execute a 'simple' command, that only consists of the opcode
1537 * 'cmd' itself, without filling any other registers
1540 * Kernel thread context (may sleep).
1543 * Zero on success, AC_ERR_* mask on failure
1545 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
1547 struct ata_taskfile tf;
1549 ata_tf_init(dev, &tf);
1552 tf.flags |= ATA_TFLAG_DEVICE;
1553 tf.protocol = ATA_PROT_NODATA;
1555 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1559 * ata_pio_need_iordy - check if iordy needed
1562 * Check if the current speed of the device requires IORDY. Used
1563 * by various controllers for chip configuration.
1566 unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1568 /* Controller doesn't support IORDY. Probably a pointless check
1569 as the caller should know this */
1570 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1572 /* PIO3 and higher it is mandatory */
1573 if (adev->pio_mode > XFER_PIO_2)
1575 /* We turn it on when possible */
1576 if (ata_id_has_iordy(adev->id))
1582 * ata_pio_mask_no_iordy - Return the non IORDY mask
1585 * Compute the highest mode possible if we are not using iordy. Return
1586 * -1 if no iordy mode is available.
1589 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1591 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1592 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1593 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1594 /* Is the speed faster than the drive allows non IORDY ? */
1596 /* This is cycle times not frequency - watch the logic! */
1597 if (pio > 240) /* PIO2 is 240nS per cycle */
1598 return 3 << ATA_SHIFT_PIO;
1599 return 7 << ATA_SHIFT_PIO;
1602 return 3 << ATA_SHIFT_PIO;
1606 * ata_dev_read_id - Read ID data from the specified device
1607 * @dev: target device
1608 * @p_class: pointer to class of the target device (may be changed)
1609 * @flags: ATA_READID_* flags
1610 * @id: buffer to read IDENTIFY data into
1612 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1613 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
1614 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1615 * for pre-ATA4 drives.
1618 * Kernel thread context (may sleep)
1621 * 0 on success, -errno otherwise.
1623 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
1624 unsigned int flags, u16 *id)
1626 struct ata_port *ap = dev->link->ap;
1627 unsigned int class = *p_class;
1628 struct ata_taskfile tf;
1629 unsigned int err_mask = 0;
1631 int may_fallback = 1, tried_spinup = 0;
1634 if (ata_msg_ctl(ap))
1635 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1637 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1639 ata_tf_init(dev, &tf);
1643 tf.command = ATA_CMD_ID_ATA;
1646 tf.command = ATA_CMD_ID_ATAPI;
1650 reason = "unsupported class";
1654 tf.protocol = ATA_PROT_PIO;
1656 /* Some devices choke if TF registers contain garbage. Make
1657 * sure those are properly initialized.
1659 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1661 /* Device presence detection is unreliable on some
1662 * controllers. Always poll IDENTIFY if available.
1664 tf.flags |= ATA_TFLAG_POLLING;
1666 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1667 id, sizeof(id[0]) * ATA_ID_WORDS);
1669 if (err_mask & AC_ERR_NODEV_HINT) {
1670 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1671 ap->print_id, dev->devno);
1675 /* Device or controller might have reported the wrong
1676 * device class. Give a shot at the other IDENTIFY if
1677 * the current one is aborted by the device.
1680 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1683 if (class == ATA_DEV_ATA)
1684 class = ATA_DEV_ATAPI;
1686 class = ATA_DEV_ATA;
1691 reason = "I/O error";
1695 /* Falling back doesn't make sense if ID data was read
1696 * successfully at least once.
1700 swap_buf_le16(id, ATA_ID_WORDS);
1704 reason = "device reports invalid type";
1706 if (class == ATA_DEV_ATA) {
1707 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1710 if (ata_id_is_ata(id))
1714 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1717 * Drive powered-up in standby mode, and requires a specific
1718 * SET_FEATURES spin-up subcommand before it will accept
1719 * anything other than the original IDENTIFY command.
1721 ata_tf_init(dev, &tf);
1722 tf.command = ATA_CMD_SET_FEATURES;
1723 tf.feature = SETFEATURES_SPINUP;
1724 tf.protocol = ATA_PROT_NODATA;
1725 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1726 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1727 if (err_mask && id[2] != 0x738c) {
1729 reason = "SPINUP failed";
1733 * If the drive initially returned incomplete IDENTIFY info,
1734 * we now must reissue the IDENTIFY command.
1736 if (id[2] == 0x37c8)
1740 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
1742 * The exact sequence expected by certain pre-ATA4 drives is:
1745 * INITIALIZE DEVICE PARAMETERS
1747 * Some drives were very specific about that exact sequence.
1749 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
1750 err_mask = ata_dev_init_params(dev, id[3], id[6]);
1753 reason = "INIT_DEV_PARAMS failed";
1757 /* current CHS translation info (id[53-58]) might be
1758 * changed. reread the identify device info.
1760 flags &= ~ATA_READID_POSTRESET;
1770 if (ata_msg_warn(ap))
1771 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
1772 "(%s, err_mask=0x%x)\n", reason, err_mask);
1776 static inline u8 ata_dev_knobble(struct ata_device *dev)
1778 struct ata_port *ap = dev->link->ap;
1779 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
1782 static void ata_dev_config_ncq(struct ata_device *dev,
1783 char *desc, size_t desc_sz)
1785 struct ata_port *ap = dev->link->ap;
1786 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1788 if (!ata_id_has_ncq(dev->id)) {
1792 if (dev->horkage & ATA_HORKAGE_NONCQ) {
1793 snprintf(desc, desc_sz, "NCQ (not used)");
1796 if (ap->flags & ATA_FLAG_NCQ) {
1797 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
1798 dev->flags |= ATA_DFLAG_NCQ;
1801 if (hdepth >= ddepth)
1802 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1804 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1808 * ata_dev_configure - Configure the specified ATA/ATAPI device
1809 * @dev: Target device to configure
1811 * Configure @dev according to @dev->id. Generic and low-level
1812 * driver specific fixups are also applied.
1815 * Kernel thread context (may sleep)
1818 * 0 on success, -errno otherwise
1820 int ata_dev_configure(struct ata_device *dev)
1822 struct ata_port *ap = dev->link->ap;
1823 struct ata_eh_context *ehc = &dev->link->eh_context;
1824 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1825 const u16 *id = dev->id;
1826 unsigned int xfer_mask;
1827 char revbuf[7]; /* XYZ-99\0 */
1828 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1829 char modelbuf[ATA_ID_PROD_LEN+1];
1832 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
1833 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1838 if (ata_msg_probe(ap))
1839 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1842 dev->horkage |= ata_dev_blacklisted(dev);
1844 /* let ACPI work its magic */
1845 rc = ata_acpi_on_devcfg(dev);
1849 /* print device capabilities */
1850 if (ata_msg_probe(ap))
1851 ata_dev_printk(dev, KERN_DEBUG,
1852 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1853 "85:%04x 86:%04x 87:%04x 88:%04x\n",
1855 id[49], id[82], id[83], id[84],
1856 id[85], id[86], id[87], id[88]);
1858 /* initialize to-be-configured parameters */
1859 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1860 dev->max_sectors = 0;
1868 * common ATA, ATAPI feature tests
1871 /* find max transfer mode; for printk only */
1872 xfer_mask = ata_id_xfermask(id);
1874 if (ata_msg_probe(ap))
1877 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1878 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1881 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1884 /* ATA-specific feature tests */
1885 if (dev->class == ATA_DEV_ATA) {
1886 if (ata_id_is_cfa(id)) {
1887 if (id[162] & 1) /* CPRM may make this media unusable */
1888 ata_dev_printk(dev, KERN_WARNING,
1889 "supports DRM functions and may "
1890 "not be fully accessable.\n");
1891 snprintf(revbuf, 7, "CFA");
1894 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1896 dev->n_sectors = ata_id_n_sectors(id);
1898 if (dev->id[59] & 0x100)
1899 dev->multi_count = dev->id[59] & 0xff;
1901 if (ata_id_has_lba(id)) {
1902 const char *lba_desc;
1906 dev->flags |= ATA_DFLAG_LBA;
1907 if (ata_id_has_lba48(id)) {
1908 dev->flags |= ATA_DFLAG_LBA48;
1911 if (dev->n_sectors >= (1UL << 28) &&
1912 ata_id_has_flush_ext(id))
1913 dev->flags |= ATA_DFLAG_FLUSH_EXT;
1916 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1917 ata_id_hpa_enabled(dev->id))
1918 dev->n_sectors = ata_hpa_resize(dev);
1921 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1923 /* print device info to dmesg */
1924 if (ata_msg_drv(ap) && print_info) {
1925 ata_dev_printk(dev, KERN_INFO,
1926 "%s: %s, %s, max %s\n",
1927 revbuf, modelbuf, fwrevbuf,
1928 ata_mode_string(xfer_mask));
1929 ata_dev_printk(dev, KERN_INFO,
1930 "%Lu sectors, multi %u: %s %s\n",
1931 (unsigned long long)dev->n_sectors,
1932 dev->multi_count, lba_desc, ncq_desc);
1937 /* Default translation */
1938 dev->cylinders = id[1];
1940 dev->sectors = id[6];
1942 if (ata_id_current_chs_valid(id)) {
1943 /* Current CHS translation is valid. */
1944 dev->cylinders = id[54];
1945 dev->heads = id[55];
1946 dev->sectors = id[56];
1949 /* print device info to dmesg */
1950 if (ata_msg_drv(ap) && print_info) {
1951 ata_dev_printk(dev, KERN_INFO,
1952 "%s: %s, %s, max %s\n",
1953 revbuf, modelbuf, fwrevbuf,
1954 ata_mode_string(xfer_mask));
1955 ata_dev_printk(dev, KERN_INFO,
1956 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1957 (unsigned long long)dev->n_sectors,
1958 dev->multi_count, dev->cylinders,
1959 dev->heads, dev->sectors);
1966 /* ATAPI-specific feature tests */
1967 else if (dev->class == ATA_DEV_ATAPI) {
1968 char *cdb_intr_string = "";
1970 rc = atapi_cdb_len(id);
1971 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1972 if (ata_msg_warn(ap))
1973 ata_dev_printk(dev, KERN_WARNING,
1974 "unsupported CDB len\n");
1978 dev->cdb_len = (unsigned int) rc;
1980 if (ata_id_cdb_intr(dev->id)) {
1981 dev->flags |= ATA_DFLAG_CDB_INTR;
1982 cdb_intr_string = ", CDB intr";
1985 /* print device info to dmesg */
1986 if (ata_msg_drv(ap) && print_info)
1987 ata_dev_printk(dev, KERN_INFO,
1988 "ATAPI: %s, %s, max %s%s\n",
1990 ata_mode_string(xfer_mask),
1994 /* determine max_sectors */
1995 dev->max_sectors = ATA_MAX_SECTORS;
1996 if (dev->flags & ATA_DFLAG_LBA48)
1997 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1999 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2000 /* Let the user know. We don't want to disallow opens for
2001 rescue purposes, or in case the vendor is just a blithering
2004 ata_dev_printk(dev, KERN_WARNING,
2005 "Drive reports diagnostics failure. This may indicate a drive\n");
2006 ata_dev_printk(dev, KERN_WARNING,
2007 "fault or invalid emulation. Contact drive vendor for information.\n");
2011 /* limit bridge transfers to udma5, 200 sectors */
2012 if (ata_dev_knobble(dev)) {
2013 if (ata_msg_drv(ap) && print_info)
2014 ata_dev_printk(dev, KERN_INFO,
2015 "applying bridge limits\n");
2016 dev->udma_mask &= ATA_UDMA5;
2017 dev->max_sectors = ATA_MAX_SECTORS;
2020 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
2021 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2024 if (ap->ops->dev_config)
2025 ap->ops->dev_config(dev);
2027 if (ata_msg_probe(ap))
2028 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2029 __FUNCTION__, ata_chk_status(ap));
2033 if (ata_msg_probe(ap))
2034 ata_dev_printk(dev, KERN_DEBUG,
2035 "%s: EXIT, err\n", __FUNCTION__);
2040 * ata_cable_40wire - return 40 wire cable type
2043 * Helper method for drivers which want to hardwire 40 wire cable
2047 int ata_cable_40wire(struct ata_port *ap)
2049 return ATA_CBL_PATA40;
2053 * ata_cable_80wire - return 80 wire cable type
2056 * Helper method for drivers which want to hardwire 80 wire cable
2060 int ata_cable_80wire(struct ata_port *ap)
2062 return ATA_CBL_PATA80;
2066 * ata_cable_unknown - return unknown PATA cable.
2069 * Helper method for drivers which have no PATA cable detection.
2072 int ata_cable_unknown(struct ata_port *ap)
2074 return ATA_CBL_PATA_UNK;
2078 * ata_cable_sata - return SATA cable type
2081 * Helper method for drivers which have SATA cables
2084 int ata_cable_sata(struct ata_port *ap)
2086 return ATA_CBL_SATA;
2090 * ata_bus_probe - Reset and probe ATA bus
2093 * Master ATA bus probing function. Initiates a hardware-dependent
2094 * bus reset, then attempts to identify any devices found on
2098 * PCI/etc. bus probe sem.
2101 * Zero on success, negative errno otherwise.
2104 int ata_bus_probe(struct ata_port *ap)
2106 unsigned int classes[ATA_MAX_DEVICES];
2107 int tries[ATA_MAX_DEVICES];
2109 struct ata_device *dev;
2113 for (i = 0; i < ATA_MAX_DEVICES; i++)
2114 tries[i] = ATA_PROBE_MAX_TRIES;
2117 /* reset and determine device classes */
2118 ap->ops->phy_reset(ap);
2120 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2121 dev = &ap->link.device[i];
2123 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2124 dev->class != ATA_DEV_UNKNOWN)
2125 classes[dev->devno] = dev->class;
2127 classes[dev->devno] = ATA_DEV_NONE;
2129 dev->class = ATA_DEV_UNKNOWN;
2134 /* after the reset the device state is PIO 0 and the controller
2135 state is undefined. Record the mode */
2137 for (i = 0; i < ATA_MAX_DEVICES; i++)
2138 ap->link.device[i].pio_mode = XFER_PIO_0;
2140 /* read IDENTIFY page and configure devices. We have to do the identify
2141 specific sequence bass-ackwards so that PDIAG- is released by
2144 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
2145 dev = &ap->link.device[i];
2148 dev->class = classes[i];
2150 if (!ata_dev_enabled(dev))
2153 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2159 /* Now ask for the cable type as PDIAG- should have been released */
2160 if (ap->ops->cable_detect)
2161 ap->cbl = ap->ops->cable_detect(ap);
2163 /* After the identify sequence we can now set up the devices. We do
2164 this in the normal order so that the user doesn't get confused */
2166 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2167 dev = &ap->link.device[i];
2168 if (!ata_dev_enabled(dev))
2171 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
2172 rc = ata_dev_configure(dev);
2173 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
2178 /* configure transfer mode */
2179 rc = ata_set_mode(ap, &dev);
2183 for (i = 0; i < ATA_MAX_DEVICES; i++)
2184 if (ata_dev_enabled(&ap->link.device[i]))
2187 /* no device present, disable port */
2188 ata_port_disable(ap);
2189 ap->ops->port_disable(ap);
2193 tries[dev->devno]--;
2197 /* eeek, something went very wrong, give up */
2198 tries[dev->devno] = 0;
2202 /* give it just one more chance */
2203 tries[dev->devno] = min(tries[dev->devno], 1);
2205 if (tries[dev->devno] == 1) {
2206 /* This is the last chance, better to slow
2207 * down than lose it.
2209 sata_down_spd_limit(ap);
2210 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2214 if (!tries[dev->devno])
2215 ata_dev_disable(dev);
2221 * ata_port_probe - Mark port as enabled
2222 * @ap: Port for which we indicate enablement
2224 * Modify @ap data structure such that the system
2225 * thinks that the entire port is enabled.
2227 * LOCKING: host lock, or some other form of
2231 void ata_port_probe(struct ata_port *ap)
2233 ap->flags &= ~ATA_FLAG_DISABLED;
2237 * sata_print_link_status - Print SATA link status
2238 * @ap: SATA port to printk link status about
2240 * This function prints link speed and status of a SATA link.
2245 void sata_print_link_status(struct ata_port *ap)
2247 u32 sstatus, scontrol, tmp;
2249 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
2251 sata_scr_read(ap, SCR_CONTROL, &scontrol);
2253 if (ata_port_online(ap)) {
2254 tmp = (sstatus >> 4) & 0xf;
2255 ata_port_printk(ap, KERN_INFO,
2256 "SATA link up %s (SStatus %X SControl %X)\n",
2257 sata_spd_string(tmp), sstatus, scontrol);
2259 ata_port_printk(ap, KERN_INFO,
2260 "SATA link down (SStatus %X SControl %X)\n",
2266 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2267 * @ap: SATA port associated with target SATA PHY.
2269 * This function issues commands to standard SATA Sxxx
2270 * PHY registers, to wake up the phy (and device), and
2271 * clear any reset condition.
2274 * PCI/etc. bus probe sem.
2277 void __sata_phy_reset(struct ata_port *ap)
2280 unsigned long timeout = jiffies + (HZ * 5);
2282 if (ap->flags & ATA_FLAG_SATA_RESET) {
2283 /* issue phy wake/reset */
2284 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
2285 /* Couldn't find anything in SATA I/II specs, but
2286 * AHCI-1.1 10.4.2 says at least 1 ms. */
2289 /* phy wake/clear reset */
2290 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
2292 /* wait for phy to become ready, if necessary */
2295 sata_scr_read(ap, SCR_STATUS, &sstatus);
2296 if ((sstatus & 0xf) != 1)
2298 } while (time_before(jiffies, timeout));
2300 /* print link status */
2301 sata_print_link_status(ap);
2303 /* TODO: phy layer with polling, timeouts, etc. */
2304 if (!ata_port_offline(ap))
2307 ata_port_disable(ap);
2309 if (ap->flags & ATA_FLAG_DISABLED)
2312 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2313 ata_port_disable(ap);
2317 ap->cbl = ATA_CBL_SATA;
2321 * sata_phy_reset - Reset SATA bus.
2322 * @ap: SATA port associated with target SATA PHY.
2324 * This function resets the SATA bus, and then probes
2325 * the bus for devices.
2328 * PCI/etc. bus probe sem.
2331 void sata_phy_reset(struct ata_port *ap)
2333 __sata_phy_reset(ap);
2334 if (ap->flags & ATA_FLAG_DISABLED)
2340 * ata_dev_pair - return other device on cable
2343 * Obtain the other device on the same cable, or if none is
2344 * present NULL is returned
2347 struct ata_device *ata_dev_pair(struct ata_device *adev)
2349 struct ata_link *link = adev->link;
2350 struct ata_device *pair = &link->device[1 - adev->devno];
2351 if (!ata_dev_enabled(pair))
2357 * ata_port_disable - Disable port.
2358 * @ap: Port to be disabled.
2360 * Modify @ap data structure such that the system
2361 * thinks that the entire port is disabled, and should
2362 * never attempt to probe or communicate with devices
2365 * LOCKING: host lock, or some other form of
2369 void ata_port_disable(struct ata_port *ap)
2371 ap->link.device[0].class = ATA_DEV_NONE;
2372 ap->link.device[1].class = ATA_DEV_NONE;
2373 ap->flags |= ATA_FLAG_DISABLED;
2377 * sata_down_spd_limit - adjust SATA spd limit downward
2378 * @ap: Port to adjust SATA spd limit for
2380 * Adjust SATA spd limit of @ap downward. Note that this
2381 * function only adjusts the limit. The change must be applied
2382 * using sata_set_spd().
2385 * Inherited from caller.
2388 * 0 on success, negative errno on failure
2390 int sata_down_spd_limit(struct ata_port *ap)
2392 u32 sstatus, spd, mask;
2395 if (!sata_scr_valid(ap))
2398 /* If SCR can be read, use it to determine the current SPD.
2399 * If not, use cached value in ap->sata_spd.
2401 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2403 spd = (sstatus >> 4) & 0xf;
2405 spd = ap->link.sata_spd;
2407 mask = ap->link.sata_spd_limit;
2411 /* unconditionally mask off the highest bit */
2412 highbit = fls(mask) - 1;
2413 mask &= ~(1 << highbit);
2415 /* Mask off all speeds higher than or equal to the current
2416 * one. Force 1.5Gbps if current SPD is not available.
2419 mask &= (1 << (spd - 1)) - 1;
2423 /* were we already at the bottom? */
2427 ap->link.sata_spd_limit = mask;
2429 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2430 sata_spd_string(fls(mask)));
2435 static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
2439 if (ap->link.sata_spd_limit == UINT_MAX)
2442 limit = fls(ap->link.sata_spd_limit);
2444 spd = (*scontrol >> 4) & 0xf;
2445 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2447 return spd != limit;
2451 * sata_set_spd_needed - is SATA spd configuration needed
2452 * @ap: Port in question
2454 * Test whether the spd limit in SControl matches
2455 * @ap->link.sata_spd_limit. This function is used to determine
2456 * whether hardreset is necessary to apply SATA spd
2460 * Inherited from caller.
2463 * 1 if SATA spd configuration is needed, 0 otherwise.
2465 int sata_set_spd_needed(struct ata_port *ap)
2469 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
2472 return __sata_set_spd_needed(ap, &scontrol);
2476 * sata_set_spd - set SATA spd according to spd limit
2477 * @ap: Port to set SATA spd for
2479 * Set SATA spd of @ap according to sata_spd_limit.
2482 * Inherited from caller.
2485 * 0 if spd doesn't need to be changed, 1 if spd has been
2486 * changed. Negative errno if SCR registers are inaccessible.
2488 int sata_set_spd(struct ata_port *ap)
2493 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2496 if (!__sata_set_spd_needed(ap, &scontrol))
2499 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2506 * This mode timing computation functionality is ported over from
2507 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2510 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
2511 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
2512 * for UDMA6, which is currently supported only by Maxtor drives.
2514 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
2517 static const struct ata_timing ata_timing[] = {
2519 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2520 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2521 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2522 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2524 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2525 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
2526 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2527 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2528 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2530 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2532 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2533 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2534 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2536 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2537 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2538 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2540 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2541 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2542 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2543 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2545 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2546 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2547 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2549 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2554 #define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2555 #define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2557 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2559 q->setup = EZ(t->setup * 1000, T);
2560 q->act8b = EZ(t->act8b * 1000, T);
2561 q->rec8b = EZ(t->rec8b * 1000, T);
2562 q->cyc8b = EZ(t->cyc8b * 1000, T);
2563 q->active = EZ(t->active * 1000, T);
2564 q->recover = EZ(t->recover * 1000, T);
2565 q->cycle = EZ(t->cycle * 1000, T);
2566 q->udma = EZ(t->udma * 1000, UT);
2569 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2570 struct ata_timing *m, unsigned int what)
2572 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2573 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2574 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2575 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2576 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2577 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2578 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2579 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2582 static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2584 const struct ata_timing *t;
2586 for (t = ata_timing; t->mode != speed; t++)
2587 if (t->mode == 0xFF)
2592 int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2593 struct ata_timing *t, int T, int UT)
2595 const struct ata_timing *s;
2596 struct ata_timing p;
2602 if (!(s = ata_timing_find_mode(speed)))
2605 memcpy(t, s, sizeof(*s));
2608 * If the drive is an EIDE drive, it can tell us it needs extended
2609 * PIO/MW_DMA cycle timing.
2612 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2613 memset(&p, 0, sizeof(p));
2614 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2615 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2616 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2617 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2618 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2620 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2624 * Convert the timing to bus clock counts.
2627 ata_timing_quantize(t, t, T, UT);
2630 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2631 * S.M.A.R.T * and some other commands. We have to ensure that the
2632 * DMA cycle timing is slower/equal than the fastest PIO timing.
2635 if (speed > XFER_PIO_6) {
2636 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2637 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2641 * Lengthen active & recovery time so that cycle time is correct.
2644 if (t->act8b + t->rec8b < t->cyc8b) {
2645 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2646 t->rec8b = t->cyc8b - t->act8b;
2649 if (t->active + t->recover < t->cycle) {
2650 t->active += (t->cycle - (t->active + t->recover)) / 2;
2651 t->recover = t->cycle - t->active;
2654 /* In a few cases quantisation may produce enough errors to
2655 leave t->cycle too low for the sum of active and recovery
2656 if so we must correct this */
2657 if (t->active + t->recover > t->cycle)
2658 t->cycle = t->active + t->recover;
2664 * ata_down_xfermask_limit - adjust dev xfer masks downward
2665 * @dev: Device to adjust xfer masks
2666 * @sel: ATA_DNXFER_* selector
2668 * Adjust xfer masks of @dev downward. Note that this function
2669 * does not apply the change. Invoking ata_set_mode() afterwards
2670 * will apply the limit.
2673 * Inherited from caller.
2676 * 0 on success, negative errno on failure
2678 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
2681 unsigned int orig_mask, xfer_mask;
2682 unsigned int pio_mask, mwdma_mask, udma_mask;
2685 quiet = !!(sel & ATA_DNXFER_QUIET);
2686 sel &= ~ATA_DNXFER_QUIET;
2688 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2691 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
2694 case ATA_DNXFER_PIO:
2695 highbit = fls(pio_mask) - 1;
2696 pio_mask &= ~(1 << highbit);
2699 case ATA_DNXFER_DMA:
2701 highbit = fls(udma_mask) - 1;
2702 udma_mask &= ~(1 << highbit);
2705 } else if (mwdma_mask) {
2706 highbit = fls(mwdma_mask) - 1;
2707 mwdma_mask &= ~(1 << highbit);
2713 case ATA_DNXFER_40C:
2714 udma_mask &= ATA_UDMA_MASK_40C;
2717 case ATA_DNXFER_FORCE_PIO0:
2719 case ATA_DNXFER_FORCE_PIO:
2728 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2730 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2734 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2735 snprintf(buf, sizeof(buf), "%s:%s",
2736 ata_mode_string(xfer_mask),
2737 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2739 snprintf(buf, sizeof(buf), "%s",
2740 ata_mode_string(xfer_mask));
2742 ata_dev_printk(dev, KERN_WARNING,
2743 "limiting speed to %s\n", buf);
2746 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2752 static int ata_dev_set_mode(struct ata_device *dev)
2754 struct ata_eh_context *ehc = &dev->link->eh_context;
2755 unsigned int err_mask;
2758 dev->flags &= ~ATA_DFLAG_PIO;
2759 if (dev->xfer_shift == ATA_SHIFT_PIO)
2760 dev->flags |= ATA_DFLAG_PIO;
2762 err_mask = ata_dev_set_xfermode(dev);
2763 /* Old CFA may refuse this command, which is just fine */
2764 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2765 err_mask &= ~AC_ERR_DEV;
2768 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2769 "(err_mask=0x%x)\n", err_mask);
2773 ehc->i.flags |= ATA_EHI_POST_SETMODE;
2774 rc = ata_dev_revalidate(dev, 0);
2775 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
2779 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2780 dev->xfer_shift, (int)dev->xfer_mode);
2782 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2783 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
2788 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
2789 * @ap: port on which timings will be programmed
2790 * @r_failed_dev: out paramter for failed device
2792 * Standard implementation of the function used to tune and set
2793 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2794 * ata_dev_set_mode() fails, pointer to the failing device is
2795 * returned in @r_failed_dev.
2798 * PCI/etc. bus probe sem.
2801 * 0 on success, negative errno otherwise
2804 int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2806 struct ata_device *dev;
2807 int i, rc = 0, used_dma = 0, found = 0;
2810 /* step 1: calculate xfer_mask */
2811 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2812 unsigned int pio_mask, dma_mask;
2814 dev = &ap->link.device[i];
2816 if (!ata_dev_enabled(dev))
2819 ata_dev_xfermask(dev);
2821 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2822 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2823 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2824 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
2833 /* step 2: always set host PIO timings */
2834 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2835 dev = &ap->link.device[i];
2836 if (!ata_dev_enabled(dev))
2839 if (!dev->pio_mode) {
2840 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
2845 dev->xfer_mode = dev->pio_mode;
2846 dev->xfer_shift = ATA_SHIFT_PIO;
2847 if (ap->ops->set_piomode)
2848 ap->ops->set_piomode(ap, dev);
2851 /* step 3: set host DMA timings */
2852 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2853 dev = &ap->link.device[i];
2855 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2858 dev->xfer_mode = dev->dma_mode;
2859 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2860 if (ap->ops->set_dmamode)
2861 ap->ops->set_dmamode(ap, dev);
2864 /* step 4: update devices' xfer mode */
2865 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2866 dev = &ap->link.device[i];
2868 /* don't update suspended devices' xfer mode */
2869 if (!ata_dev_enabled(dev))
2872 rc = ata_dev_set_mode(dev);
2877 /* Record simplex status. If we selected DMA then the other
2878 * host channels are not permitted to do so.
2880 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2881 ap->host->simplex_claimed = ap;
2885 *r_failed_dev = dev;
2890 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2891 * @ap: port on which timings will be programmed
2892 * @r_failed_dev: out paramter for failed device
2894 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2895 * ata_set_mode() fails, pointer to the failing device is
2896 * returned in @r_failed_dev.
2899 * PCI/etc. bus probe sem.
2902 * 0 on success, negative errno otherwise
2904 int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2906 /* has private set_mode? */
2907 if (ap->ops->set_mode)
2908 return ap->ops->set_mode(ap, r_failed_dev);
2909 return ata_do_set_mode(ap, r_failed_dev);
2913 * ata_tf_to_host - issue ATA taskfile to host controller
2914 * @ap: port to which command is being issued
2915 * @tf: ATA taskfile register set
2917 * Issues ATA taskfile register set to ATA host controller,
2918 * with proper synchronization with interrupt handler and
2922 * spin_lock_irqsave(host lock)
2925 static inline void ata_tf_to_host(struct ata_port *ap,
2926 const struct ata_taskfile *tf)
2928 ap->ops->tf_load(ap, tf);
2929 ap->ops->exec_command(ap, tf);
2933 * ata_busy_sleep - sleep until BSY clears, or timeout
2934 * @ap: port containing status register to be polled
2935 * @tmout_pat: impatience timeout
2936 * @tmout: overall timeout
2938 * Sleep until ATA Status register bit BSY clears,
2939 * or a timeout occurs.
2942 * Kernel thread context (may sleep).
2945 * 0 on success, -errno otherwise.
2947 int ata_busy_sleep(struct ata_port *ap,
2948 unsigned long tmout_pat, unsigned long tmout)
2950 unsigned long timer_start, timeout;
2953 status = ata_busy_wait(ap, ATA_BUSY, 300);
2954 timer_start = jiffies;
2955 timeout = timer_start + tmout_pat;
2956 while (status != 0xff && (status & ATA_BUSY) &&
2957 time_before(jiffies, timeout)) {
2959 status = ata_busy_wait(ap, ATA_BUSY, 3);
2962 if (status != 0xff && (status & ATA_BUSY))
2963 ata_port_printk(ap, KERN_WARNING,
2964 "port is slow to respond, please be patient "
2965 "(Status 0x%x)\n", status);
2967 timeout = timer_start + tmout;
2968 while (status != 0xff && (status & ATA_BUSY) &&
2969 time_before(jiffies, timeout)) {
2971 status = ata_chk_status(ap);
2977 if (status & ATA_BUSY) {
2978 ata_port_printk(ap, KERN_ERR, "port failed to respond "
2979 "(%lu secs, Status 0x%x)\n",
2980 tmout / HZ, status);
2988 * ata_wait_ready - sleep until BSY clears, or timeout
2989 * @ap: port containing status register to be polled
2990 * @deadline: deadline jiffies for the operation
2992 * Sleep until ATA Status register bit BSY clears, or timeout
2996 * Kernel thread context (may sleep).
2999 * 0 on success, -errno otherwise.
3001 int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3003 unsigned long start = jiffies;
3007 u8 status = ata_chk_status(ap);
3008 unsigned long now = jiffies;
3010 if (!(status & ATA_BUSY))
3012 if (!ata_port_online(ap) && status == 0xff)
3014 if (time_after(now, deadline))
3017 if (!warned && time_after(now, start + 5 * HZ) &&
3018 (deadline - now > 3 * HZ)) {
3019 ata_port_printk(ap, KERN_WARNING,
3020 "port is slow to respond, please be patient "
3021 "(Status 0x%x)\n", status);
3029 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3030 unsigned long deadline)
3032 struct ata_ioports *ioaddr = &ap->ioaddr;
3033 unsigned int dev0 = devmask & (1 << 0);
3034 unsigned int dev1 = devmask & (1 << 1);
3037 /* if device 0 was found in ata_devchk, wait for its
3041 rc = ata_wait_ready(ap, deadline);
3049 /* if device 1 was found in ata_devchk, wait for register
3050 * access briefly, then wait for BSY to clear.
3055 ap->ops->dev_select(ap, 1);
3057 /* Wait for register access. Some ATAPI devices fail
3058 * to set nsect/lbal after reset, so don't waste too
3059 * much time on it. We're gonna wait for !BSY anyway.
3061 for (i = 0; i < 2; i++) {
3064 nsect = ioread8(ioaddr->nsect_addr);
3065 lbal = ioread8(ioaddr->lbal_addr);
3066 if ((nsect == 1) && (lbal == 1))
3068 msleep(50); /* give drive a breather */
3071 rc = ata_wait_ready(ap, deadline);
3079 /* is all this really necessary? */
3080 ap->ops->dev_select(ap, 0);
3082 ap->ops->dev_select(ap, 1);
3084 ap->ops->dev_select(ap, 0);
3089 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3090 unsigned long deadline)
3092 struct ata_ioports *ioaddr = &ap->ioaddr;
3094 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
3096 /* software reset. causes dev0 to be selected */
3097 iowrite8(ap->ctl, ioaddr->ctl_addr);
3098 udelay(20); /* FIXME: flush */
3099 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3100 udelay(20); /* FIXME: flush */
3101 iowrite8(ap->ctl, ioaddr->ctl_addr);
3103 /* spec mandates ">= 2ms" before checking status.
3104 * We wait 150ms, because that was the magic delay used for
3105 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3106 * between when the ATA command register is written, and then
3107 * status is checked. Because waiting for "a while" before
3108 * checking status is fine, post SRST, we perform this magic
3109 * delay here as well.
3111 * Old drivers/ide uses the 2mS rule and then waits for ready
3115 /* Before we perform post reset processing we want to see if
3116 * the bus shows 0xFF because the odd clown forgets the D7
3117 * pulldown resistor.
3119 if (ata_check_status(ap) == 0xFF)
3122 return ata_bus_post_reset(ap, devmask, deadline);
3126 * ata_bus_reset - reset host port and associated ATA channel
3127 * @ap: port to reset
3129 * This is typically the first time we actually start issuing
3130 * commands to the ATA channel. We wait for BSY to clear, then
3131 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3132 * result. Determine what devices, if any, are on the channel
3133 * by looking at the device 0/1 error register. Look at the signature
3134 * stored in each device's taskfile registers, to determine if
3135 * the device is ATA or ATAPI.
3138 * PCI/etc. bus probe sem.
3139 * Obtains host lock.
3142 * Sets ATA_FLAG_DISABLED if bus reset fails.
3145 void ata_bus_reset(struct ata_port *ap)
3147 struct ata_device *device = ap->link.device;
3148 struct ata_ioports *ioaddr = &ap->ioaddr;
3149 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3151 unsigned int dev0, dev1 = 0, devmask = 0;
3154 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
3156 /* determine if device 0/1 are present */
3157 if (ap->flags & ATA_FLAG_SATA_RESET)
3160 dev0 = ata_devchk(ap, 0);
3162 dev1 = ata_devchk(ap, 1);
3166 devmask |= (1 << 0);
3168 devmask |= (1 << 1);
3170 /* select device 0 again */
3171 ap->ops->dev_select(ap, 0);
3173 /* issue bus reset */
3174 if (ap->flags & ATA_FLAG_SRST) {
3175 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3176 if (rc && rc != -ENODEV)
3181 * determine by signature whether we have ATA or ATAPI devices
3183 device[0].class = ata_dev_try_classify(ap, 0, &err);
3184 if ((slave_possible) && (err != 0x81))
3185 device[1].class = ata_dev_try_classify(ap, 1, &err);
3187 /* is double-select really necessary? */
3188 if (device[1].class != ATA_DEV_NONE)
3189 ap->ops->dev_select(ap, 1);
3190 if (device[0].class != ATA_DEV_NONE)
3191 ap->ops->dev_select(ap, 0);
3193 /* if no devices were detected, disable this port */
3194 if ((device[0].class == ATA_DEV_NONE) &&
3195 (device[1].class == ATA_DEV_NONE))
3198 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3199 /* set up device control for ATA_FLAG_SATA_RESET */
3200 iowrite8(ap->ctl, ioaddr->ctl_addr);
3207 ata_port_printk(ap, KERN_ERR, "disabling port\n");
3208 ap->ops->port_disable(ap);
3214 * sata_phy_debounce - debounce SATA phy status
3215 * @ap: ATA port to debounce SATA phy status for
3216 * @params: timing parameters { interval, duratinon, timeout } in msec
3217 * @deadline: deadline jiffies for the operation
3219 * Make sure SStatus of @ap reaches stable state, determined by
3220 * holding the same value where DET is not 1 for @duration polled
3221 * every @interval, before @timeout. Timeout constraints the
3222 * beginning of the stable state. Because DET gets stuck at 1 on
3223 * some controllers after hot unplugging, this functions waits
3224 * until timeout then returns 0 if DET is stable at 1.
3226 * @timeout is further limited by @deadline. The sooner of the
3230 * Kernel thread context (may sleep)
3233 * 0 on success, -errno on failure.
3235 int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3236 unsigned long deadline)
3238 unsigned long interval_msec = params[0];
3239 unsigned long duration = msecs_to_jiffies(params[1]);
3240 unsigned long last_jiffies, t;
3244 t = jiffies + msecs_to_jiffies(params[2]);
3245 if (time_before(t, deadline))
3248 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3253 last_jiffies = jiffies;
3256 msleep(interval_msec);
3257 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3263 if (cur == 1 && time_before(jiffies, deadline))
3265 if (time_after(jiffies, last_jiffies + duration))
3270 /* unstable, start over */
3272 last_jiffies = jiffies;
3274 /* Check deadline. If debouncing failed, return
3275 * -EPIPE to tell upper layer to lower link speed.
3277 if (time_after(jiffies, deadline))
3283 * sata_phy_resume - resume SATA phy
3284 * @ap: ATA port to resume SATA phy for
3285 * @params: timing parameters { interval, duratinon, timeout } in msec
3286 * @deadline: deadline jiffies for the operation
3288 * Resume SATA phy of @ap and debounce it.
3291 * Kernel thread context (may sleep)
3294 * 0 on success, -errno on failure.
3296 int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3297 unsigned long deadline)
3302 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3305 scontrol = (scontrol & 0x0f0) | 0x300;
3307 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3310 /* Some PHYs react badly if SStatus is pounded immediately
3311 * after resuming. Delay 200ms before debouncing.
3315 return sata_phy_debounce(ap, params, deadline);
3319 * ata_std_prereset - prepare for reset
3320 * @ap: ATA port to be reset
3321 * @deadline: deadline jiffies for the operation
3323 * @ap is about to be reset. Initialize it. Failure from
3324 * prereset makes libata abort whole reset sequence and give up
3325 * that port, so prereset should be best-effort. It does its
3326 * best to prepare for reset sequence but if things go wrong, it
3327 * should just whine, not fail.
3330 * Kernel thread context (may sleep)
3333 * 0 on success, -errno otherwise.
3335 int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
3337 struct ata_eh_context *ehc = &ap->link.eh_context;
3338 const unsigned long *timing = sata_ehc_deb_timing(ehc);
3341 /* handle link resume */
3342 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3343 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3344 ehc->i.action |= ATA_EH_HARDRESET;
3346 /* if we're about to do hardreset, nothing more to do */
3347 if (ehc->i.action & ATA_EH_HARDRESET)
3350 /* if SATA, resume phy */
3351 if (ap->flags & ATA_FLAG_SATA) {
3352 rc = sata_phy_resume(ap, timing, deadline);
3353 /* whine about phy resume failure but proceed */
3354 if (rc && rc != -EOPNOTSUPP)
3355 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3356 "link for reset (errno=%d)\n", rc);
3359 /* Wait for !BSY if the controller can wait for the first D2H
3360 * Reg FIS and we don't know that no device is attached.
3362 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3363 rc = ata_wait_ready(ap, deadline);
3364 if (rc && rc != -ENODEV) {
3365 ata_port_printk(ap, KERN_WARNING, "device not ready "
3366 "(errno=%d), forcing hardreset\n", rc);
3367 ehc->i.action |= ATA_EH_HARDRESET;
3375 * ata_std_softreset - reset host port via ATA SRST
3376 * @ap: port to reset
3377 * @classes: resulting classes of attached devices
3378 * @deadline: deadline jiffies for the operation
3380 * Reset host port using ATA SRST.
3383 * Kernel thread context (may sleep)
3386 * 0 on success, -errno otherwise.
3388 int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3389 unsigned long deadline)
3391 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3392 unsigned int devmask = 0;
3398 if (ata_port_offline(ap)) {
3399 classes[0] = ATA_DEV_NONE;
3403 /* determine if device 0/1 are present */
3404 if (ata_devchk(ap, 0))
3405 devmask |= (1 << 0);
3406 if (slave_possible && ata_devchk(ap, 1))
3407 devmask |= (1 << 1);
3409 /* select device 0 again */
3410 ap->ops->dev_select(ap, 0);
3412 /* issue bus reset */
3413 DPRINTK("about to softreset, devmask=%x\n", devmask);
3414 rc = ata_bus_softreset(ap, devmask, deadline);
3415 /* if link is occupied, -ENODEV too is an error */
3416 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
3417 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3421 /* determine by signature whether we have ATA or ATAPI devices */
3422 classes[0] = ata_dev_try_classify(ap, 0, &err);
3423 if (slave_possible && err != 0x81)
3424 classes[1] = ata_dev_try_classify(ap, 1, &err);
3427 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3432 * sata_port_hardreset - reset port via SATA phy reset
3433 * @ap: port to reset
3434 * @timing: timing parameters { interval, duratinon, timeout } in msec
3435 * @deadline: deadline jiffies for the operation
3437 * SATA phy-reset host port using DET bits of SControl register.
3440 * Kernel thread context (may sleep)
3443 * 0 on success, -errno otherwise.
3445 int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3446 unsigned long deadline)
3453 if (sata_set_spd_needed(ap)) {
3454 /* SATA spec says nothing about how to reconfigure
3455 * spd. To be on the safe side, turn off phy during
3456 * reconfiguration. This works for at least ICH7 AHCI
3459 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3462 scontrol = (scontrol & 0x0f0) | 0x304;
3464 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3470 /* issue phy wake/reset */
3471 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3474 scontrol = (scontrol & 0x0f0) | 0x301;
3476 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
3479 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
3480 * 10.4.2 says at least 1 ms.
3484 /* bring phy back */
3485 rc = sata_phy_resume(ap, timing, deadline);
3487 DPRINTK("EXIT, rc=%d\n", rc);
3492 * sata_std_hardreset - reset host port via SATA phy reset
3493 * @ap: port to reset
3494 * @class: resulting class of attached device
3495 * @deadline: deadline jiffies for the operation
3497 * SATA phy-reset host port using DET bits of SControl register,
3498 * wait for !BSY and classify the attached device.
3501 * Kernel thread context (may sleep)
3504 * 0 on success, -errno otherwise.
3506 int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3507 unsigned long deadline)
3509 const unsigned long *timing = sata_ehc_deb_timing(&ap->link.eh_context);
3515 rc = sata_port_hardreset(ap, timing, deadline);
3517 ata_port_printk(ap, KERN_ERR,
3518 "COMRESET failed (errno=%d)\n", rc);
3522 /* TODO: phy layer with polling, timeouts, etc. */
3523 if (ata_port_offline(ap)) {
3524 *class = ATA_DEV_NONE;
3525 DPRINTK("EXIT, link offline\n");
3529 /* wait a while before checking status, see SRST for more info */
3532 rc = ata_wait_ready(ap, deadline);
3533 /* link occupied, -ENODEV too is an error */
3535 ata_port_printk(ap, KERN_ERR,
3536 "COMRESET failed (errno=%d)\n", rc);
3540 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3542 *class = ata_dev_try_classify(ap, 0, NULL);
3544 DPRINTK("EXIT, class=%u\n", *class);
3549 * ata_std_postreset - standard postreset callback
3550 * @ap: the target ata_port
3551 * @classes: classes of attached devices
3553 * This function is invoked after a successful reset. Note that
3554 * the device might have been reset more than once using
3555 * different reset methods before postreset is invoked.
3558 * Kernel thread context (may sleep)
3560 void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3566 /* print link status */
3567 sata_print_link_status(ap);
3570 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3571 sata_scr_write(ap, SCR_ERROR, serror);
3573 /* is double-select really necessary? */
3574 if (classes[0] != ATA_DEV_NONE)
3575 ap->ops->dev_select(ap, 1);
3576 if (classes[1] != ATA_DEV_NONE)
3577 ap->ops->dev_select(ap, 0);
3579 /* bail out if no device is present */
3580 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3581 DPRINTK("EXIT, no device\n");
3585 /* set up device control */
3586 if (ap->ioaddr.ctl_addr)
3587 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
3593 * ata_dev_same_device - Determine whether new ID matches configured device
3594 * @dev: device to compare against
3595 * @new_class: class of the new device
3596 * @new_id: IDENTIFY page of the new device
3598 * Compare @new_class and @new_id against @dev and determine
3599 * whether @dev is the device indicated by @new_class and
3606 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3608 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3611 const u16 *old_id = dev->id;
3612 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3613 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
3615 if (dev->class != new_class) {
3616 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3617 dev->class, new_class);
3621 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3622 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3623 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3624 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
3626 if (strcmp(model[0], model[1])) {
3627 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3628 "'%s' != '%s'\n", model[0], model[1]);
3632 if (strcmp(serial[0], serial[1])) {
3633 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3634 "'%s' != '%s'\n", serial[0], serial[1]);
3642 * ata_dev_reread_id - Re-read IDENTIFY data
3643 * @dev: target ATA device
3644 * @readid_flags: read ID flags
3646 * Re-read IDENTIFY page and make sure @dev is still attached to
3650 * Kernel thread context (may sleep)
3653 * 0 on success, negative errno otherwise
3655 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
3657 unsigned int class = dev->class;
3658 u16 *id = (void *)dev->link->ap->sector_buf;
3662 rc = ata_dev_read_id(dev, &class, readid_flags, id);
3666 /* is the device still there? */
3667 if (!ata_dev_same_device(dev, class, id))
3670 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
3675 * ata_dev_revalidate - Revalidate ATA device
3676 * @dev: device to revalidate
3677 * @readid_flags: read ID flags
3679 * Re-read IDENTIFY page, make sure @dev is still attached to the
3680 * port and reconfigure it according to the new IDENTIFY page.
3683 * Kernel thread context (may sleep)
3686 * 0 on success, negative errno otherwise
3688 int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3690 u64 n_sectors = dev->n_sectors;
3693 if (!ata_dev_enabled(dev))
3697 rc = ata_dev_reread_id(dev, readid_flags);
3701 /* configure device according to the new ID */
3702 rc = ata_dev_configure(dev);
3706 /* verify n_sectors hasn't changed */
3707 if (dev->class == ATA_DEV_ATA && n_sectors &&
3708 dev->n_sectors != n_sectors) {
3709 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3711 (unsigned long long)n_sectors,
3712 (unsigned long long)dev->n_sectors);
3714 /* restore original n_sectors */
3715 dev->n_sectors = n_sectors;
3724 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
3728 struct ata_blacklist_entry {
3729 const char *model_num;
3730 const char *model_rev;
3731 unsigned long horkage;
3734 static const struct ata_blacklist_entry ata_device_blacklist [] = {
3735 /* Devices with DMA related problems under Linux */
3736 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3737 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3738 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3739 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3740 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3741 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3742 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3743 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3744 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3745 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3746 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3747 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3748 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3749 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3750 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3751 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3752 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3753 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3754 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3755 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3756 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3757 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3758 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3759 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3760 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3761 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3762 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3763 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3764 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3765 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3766 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
3767 { "IOMEGA ZIP 250 ATAPI Floppy",
3768 NULL, ATA_HORKAGE_NODMA },
3770 /* Weird ATAPI devices */
3771 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
3773 /* Devices we expect to fail diagnostics */
3775 /* Devices where NCQ should be avoided */
3777 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3778 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3779 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
3781 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
3782 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
3783 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
3784 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3785 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3786 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
3787 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3788 ATA_HORKAGE_NONCQ },
3789 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3790 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3791 /* Blacklist entries taken from Silicon Image 3124/3132
3792 Windows driver .inf file - also several Linux problem reports */
3793 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3794 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3795 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
3796 /* Drives which do spurious command completion */
3797 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
3798 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
3799 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
3800 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
3801 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3802 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3803 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
3804 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
3806 /* devices which puke on READ_NATIVE_MAX */
3807 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3808 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3809 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3810 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
3816 static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
3818 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3819 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
3820 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3822 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3823 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
3825 while (ad->model_num) {
3826 if (!strcmp(ad->model_num, model_num)) {
3827 if (ad->model_rev == NULL)
3829 if (!strcmp(ad->model_rev, model_rev))
3837 static int ata_dma_blacklisted(const struct ata_device *dev)
3839 /* We don't support polling DMA.
3840 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3841 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3843 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
3844 (dev->flags & ATA_DFLAG_CDB_INTR))
3846 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
3850 * ata_dev_xfermask - Compute supported xfermask of the given device
3851 * @dev: Device to compute xfermask for
3853 * Compute supported xfermask of @dev and store it in
3854 * dev->*_mask. This function is responsible for applying all
3855 * known limits including host controller limits, device
3861 static void ata_dev_xfermask(struct ata_device *dev)
3863 struct ata_link *link = dev->link;
3864 struct ata_port *ap = link->ap;
3865 struct ata_host *host = ap->host;
3866 unsigned long xfer_mask;
3868 /* controller modes available */
3869 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3870 ap->mwdma_mask, ap->udma_mask);
3872 /* drive modes available */
3873 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3874 dev->mwdma_mask, dev->udma_mask);
3875 xfer_mask &= ata_id_xfermask(dev->id);
3878 * CFA Advanced TrueIDE timings are not allowed on a shared
3881 if (ata_dev_pair(dev)) {
3882 /* No PIO5 or PIO6 */
3883 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3884 /* No MWDMA3 or MWDMA 4 */
3885 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3888 if (ata_dma_blacklisted(dev)) {
3889 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3890 ata_dev_printk(dev, KERN_WARNING,
3891 "device is on DMA blacklist, disabling DMA\n");
3894 if ((host->flags & ATA_HOST_SIMPLEX) &&
3895 host->simplex_claimed && host->simplex_claimed != ap) {
3896 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3897 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3898 "other device, disabling DMA\n");
3901 if (ap->flags & ATA_FLAG_NO_IORDY)
3902 xfer_mask &= ata_pio_mask_no_iordy(dev);
3904 if (ap->ops->mode_filter)
3905 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
3907 /* Apply cable rule here. Don't apply it early because when
3908 * we handle hot plug the cable type can itself change.
3909 * Check this last so that we know if the transfer rate was
3910 * solely limited by the cable.
3911 * Unknown or 80 wire cables reported host side are checked
3912 * drive side as well. Cases where we know a 40wire cable
3913 * is used safely for 80 are not checked here.
3915 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3916 /* UDMA/44 or higher would be available */
3917 if((ap->cbl == ATA_CBL_PATA40) ||
3918 (ata_drive_40wire(dev->id) &&
3919 (ap->cbl == ATA_CBL_PATA_UNK ||
3920 ap->cbl == ATA_CBL_PATA80))) {
3921 ata_dev_printk(dev, KERN_WARNING,
3922 "limited to UDMA/33 due to 40-wire cable\n");
3923 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3926 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3927 &dev->mwdma_mask, &dev->udma_mask);
3931 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
3932 * @dev: Device to which command will be sent
3934 * Issue SET FEATURES - XFER MODE command to device @dev
3938 * PCI/etc. bus probe sem.
3941 * 0 on success, AC_ERR_* mask otherwise.
3944 static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
3946 struct ata_taskfile tf;
3947 unsigned int err_mask;
3949 /* set up set-features taskfile */
3950 DPRINTK("set features - xfer mode\n");
3952 /* Some controllers and ATAPI devices show flaky interrupt
3953 * behavior after setting xfer mode. Use polling instead.
3955 ata_tf_init(dev, &tf);
3956 tf.command = ATA_CMD_SET_FEATURES;
3957 tf.feature = SETFEATURES_XFER;
3958 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
3959 tf.protocol = ATA_PROT_NODATA;
3960 tf.nsect = dev->xfer_mode;
3962 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
3964 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3969 * ata_dev_init_params - Issue INIT DEV PARAMS command
3970 * @dev: Device to which command will be sent
3971 * @heads: Number of heads (taskfile parameter)
3972 * @sectors: Number of sectors (taskfile parameter)
3975 * Kernel thread context (may sleep)
3978 * 0 on success, AC_ERR_* mask otherwise.
3980 static unsigned int ata_dev_init_params(struct ata_device *dev,
3981 u16 heads, u16 sectors)
3983 struct ata_taskfile tf;
3984 unsigned int err_mask;
3986 /* Number of sectors per track 1-255. Number of heads 1-16 */
3987 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
3988 return AC_ERR_INVALID;
3990 /* set up init dev params taskfile */
3991 DPRINTK("init dev params \n");
3993 ata_tf_init(dev, &tf);
3994 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3995 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3996 tf.protocol = ATA_PROT_NODATA;
3998 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
4000 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
4001 /* A clean abort indicates an original or just out of spec drive
4002 and we should continue as we issue the setup based on the
4003 drive reported working geometry */
4004 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4007 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4012 * ata_sg_clean - Unmap DMA memory associated with command
4013 * @qc: Command containing DMA memory to be released
4015 * Unmap all mapped DMA memory associated with this command.
4018 * spin_lock_irqsave(host lock)
4020 void ata_sg_clean(struct ata_queued_cmd *qc)
4022 struct ata_port *ap = qc->ap;
4023 struct scatterlist *sg = qc->__sg;
4024 int dir = qc->dma_dir;
4025 void *pad_buf = NULL;
4027 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4028 WARN_ON(sg == NULL);
4030 if (qc->flags & ATA_QCFLAG_SINGLE)
4031 WARN_ON(qc->n_elem > 1);
4033 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
4035 /* if we padded the buffer out to 32-bit bound, and data
4036 * xfer direction is from-device, we must copy from the
4037 * pad buffer back into the supplied buffer
4039 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4040 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4042 if (qc->flags & ATA_QCFLAG_SG) {
4044 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
4045 /* restore last sg */
4046 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4048 struct scatterlist *psg = &qc->pad_sgent;
4049 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4050 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
4051 kunmap_atomic(addr, KM_IRQ0);
4055 dma_unmap_single(ap->dev,
4056 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4059 sg->length += qc->pad_len;
4061 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4062 pad_buf, qc->pad_len);
4065 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4070 * ata_fill_sg - Fill PCI IDE PRD table
4071 * @qc: Metadata associated with taskfile to be transferred
4073 * Fill PCI IDE PRD (scatter-gather) table with segments
4074 * associated with the current disk command.
4077 * spin_lock_irqsave(host lock)
4080 static void ata_fill_sg(struct ata_queued_cmd *qc)
4082 struct ata_port *ap = qc->ap;
4083 struct scatterlist *sg;
4086 WARN_ON(qc->__sg == NULL);
4087 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4090 ata_for_each_sg(sg, qc) {
4094 /* determine if physical DMA addr spans 64K boundary.
4095 * Note h/w doesn't support 64-bit, so we unconditionally
4096 * truncate dma_addr_t to u32.
4098 addr = (u32) sg_dma_address(sg);
4099 sg_len = sg_dma_len(sg);
4102 offset = addr & 0xffff;
4104 if ((offset + sg_len) > 0x10000)
4105 len = 0x10000 - offset;
4107 ap->prd[idx].addr = cpu_to_le32(addr);
4108 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4109 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4118 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4122 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4123 * @qc: Metadata associated with taskfile to be transferred
4125 * Fill PCI IDE PRD (scatter-gather) table with segments
4126 * associated with the current disk command. Perform the fill
4127 * so that we avoid writing any length 64K records for
4128 * controllers that don't follow the spec.
4131 * spin_lock_irqsave(host lock)
4134 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4136 struct ata_port *ap = qc->ap;
4137 struct scatterlist *sg;
4140 WARN_ON(qc->__sg == NULL);
4141 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4144 ata_for_each_sg(sg, qc) {
4146 u32 sg_len, len, blen;
4148 /* determine if physical DMA addr spans 64K boundary.
4149 * Note h/w doesn't support 64-bit, so we unconditionally
4150 * truncate dma_addr_t to u32.
4152 addr = (u32) sg_dma_address(sg);
4153 sg_len = sg_dma_len(sg);
4156 offset = addr & 0xffff;
4158 if ((offset + sg_len) > 0x10000)
4159 len = 0x10000 - offset;
4161 blen = len & 0xffff;
4162 ap->prd[idx].addr = cpu_to_le32(addr);
4164 /* Some PATA chipsets like the CS5530 can't
4165 cope with 0x0000 meaning 64K as the spec says */
4166 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4168 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4170 ap->prd[idx].flags_len = cpu_to_le32(blen);
4171 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4180 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4184 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4185 * @qc: Metadata associated with taskfile to check
4187 * Allow low-level driver to filter ATA PACKET commands, returning
4188 * a status indicating whether or not it is OK to use DMA for the
4189 * supplied PACKET command.
4192 * spin_lock_irqsave(host lock)
4194 * RETURNS: 0 when ATAPI DMA can be used
4197 int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4199 struct ata_port *ap = qc->ap;
4201 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4202 * few ATAPI devices choke on such DMA requests.
4204 if (unlikely(qc->nbytes & 15))
4207 if (ap->ops->check_atapi_dma)
4208 return ap->ops->check_atapi_dma(qc);
4214 * ata_qc_prep - Prepare taskfile for submission
4215 * @qc: Metadata associated with taskfile to be prepared
4217 * Prepare ATA taskfile for submission.
4220 * spin_lock_irqsave(host lock)
4222 void ata_qc_prep(struct ata_queued_cmd *qc)
4224 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4231 * ata_dumb_qc_prep - Prepare taskfile for submission
4232 * @qc: Metadata associated with taskfile to be prepared
4234 * Prepare ATA taskfile for submission.
4237 * spin_lock_irqsave(host lock)
4239 void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4241 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4244 ata_fill_sg_dumb(qc);
4247 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4250 * ata_sg_init_one - Associate command with memory buffer
4251 * @qc: Command to be associated
4252 * @buf: Memory buffer
4253 * @buflen: Length of memory buffer, in bytes.
4255 * Initialize the data-related elements of queued_cmd @qc
4256 * to point to a single memory buffer, @buf of byte length @buflen.
4259 * spin_lock_irqsave(host lock)
4262 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4264 qc->flags |= ATA_QCFLAG_SINGLE;
4266 qc->__sg = &qc->sgent;
4268 qc->orig_n_elem = 1;
4270 qc->nbytes = buflen;
4272 sg_init_one(&qc->sgent, buf, buflen);
4276 * ata_sg_init - Associate command with scatter-gather table.
4277 * @qc: Command to be associated
4278 * @sg: Scatter-gather table.
4279 * @n_elem: Number of elements in s/g table.
4281 * Initialize the data-related elements of queued_cmd @qc
4282 * to point to a scatter-gather table @sg, containing @n_elem
4286 * spin_lock_irqsave(host lock)
4289 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4290 unsigned int n_elem)
4292 qc->flags |= ATA_QCFLAG_SG;
4294 qc->n_elem = n_elem;
4295 qc->orig_n_elem = n_elem;
4299 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4300 * @qc: Command with memory buffer to be mapped.
4302 * DMA-map the memory buffer associated with queued_cmd @qc.
4305 * spin_lock_irqsave(host lock)
4308 * Zero on success, negative on error.
4311 static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4313 struct ata_port *ap = qc->ap;
4314 int dir = qc->dma_dir;
4315 struct scatterlist *sg = qc->__sg;
4316 dma_addr_t dma_address;
4319 /* we must lengthen transfers to end on a 32-bit boundary */
4320 qc->pad_len = sg->length & 3;
4322 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4323 struct scatterlist *psg = &qc->pad_sgent;
4325 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4327 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4329 if (qc->tf.flags & ATA_TFLAG_WRITE)
4330 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4333 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4334 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4336 sg->length -= qc->pad_len;
4337 if (sg->length == 0)
4340 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4341 sg->length, qc->pad_len);
4349 dma_address = dma_map_single(ap->dev, qc->buf_virt,
4351 if (dma_mapping_error(dma_address)) {
4353 sg->length += qc->pad_len;
4357 sg_dma_address(sg) = dma_address;
4358 sg_dma_len(sg) = sg->length;
4361 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4362 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4368 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4369 * @qc: Command with scatter-gather table to be mapped.
4371 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4374 * spin_lock_irqsave(host lock)
4377 * Zero on success, negative on error.
4381 static int ata_sg_setup(struct ata_queued_cmd *qc)
4383 struct ata_port *ap = qc->ap;
4384 struct scatterlist *sg = qc->__sg;
4385 struct scatterlist *lsg = &sg[qc->n_elem - 1];
4386 int n_elem, pre_n_elem, dir, trim_sg = 0;
4388 VPRINTK("ENTER, ata%u\n", ap->print_id);
4389 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
4391 /* we must lengthen transfers to end on a 32-bit boundary */
4392 qc->pad_len = lsg->length & 3;
4394 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4395 struct scatterlist *psg = &qc->pad_sgent;
4396 unsigned int offset;
4398 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
4400 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4403 * psg->page/offset are used to copy to-be-written
4404 * data in this function or read data in ata_sg_clean.
4406 offset = lsg->offset + lsg->length - qc->pad_len;
4407 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4408 psg->offset = offset_in_page(offset);
4410 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4411 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4412 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
4413 kunmap_atomic(addr, KM_IRQ0);
4416 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4417 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4419 lsg->length -= qc->pad_len;
4420 if (lsg->length == 0)
4423 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4424 qc->n_elem - 1, lsg->length, qc->pad_len);
4427 pre_n_elem = qc->n_elem;
4428 if (trim_sg && pre_n_elem)
4437 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
4439 /* restore last sg */
4440 lsg->length += qc->pad_len;
4444 DPRINTK("%d sg elements mapped\n", n_elem);
4447 qc->n_elem = n_elem;
4453 * swap_buf_le16 - swap halves of 16-bit words in place
4454 * @buf: Buffer to swap
4455 * @buf_words: Number of 16-bit words in buffer.
4457 * Swap halves of 16-bit words if needed to convert from
4458 * little-endian byte order to native cpu byte order, or
4462 * Inherited from caller.
4464 void swap_buf_le16(u16 *buf, unsigned int buf_words)
4469 for (i = 0; i < buf_words; i++)
4470 buf[i] = le16_to_cpu(buf[i]);
4471 #endif /* __BIG_ENDIAN */
4475 * ata_data_xfer - Transfer data by PIO
4476 * @adev: device to target
4478 * @buflen: buffer length
4479 * @write_data: read/write
4481 * Transfer data from/to the device data register by PIO.
4484 * Inherited from caller.
4486 void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4487 unsigned int buflen, int write_data)
4489 struct ata_port *ap = adev->link->ap;
4490 unsigned int words = buflen >> 1;
4492 /* Transfer multiple of 2 bytes */
4494 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
4496 ioread16_rep(ap->ioaddr.data_addr, buf, words);
4498 /* Transfer trailing 1 byte, if any. */
4499 if (unlikely(buflen & 0x01)) {
4500 u16 align_buf[1] = { 0 };
4501 unsigned char *trailing_buf = buf + buflen - 1;
4504 memcpy(align_buf, trailing_buf, 1);
4505 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
4507 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
4508 memcpy(trailing_buf, align_buf, 1);
4514 * ata_data_xfer_noirq - Transfer data by PIO
4515 * @adev: device to target
4517 * @buflen: buffer length
4518 * @write_data: read/write
4520 * Transfer data from/to the device data register by PIO. Do the
4521 * transfer with interrupts disabled.
4524 * Inherited from caller.
4526 void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4527 unsigned int buflen, int write_data)
4529 unsigned long flags;
4530 local_irq_save(flags);
4531 ata_data_xfer(adev, buf, buflen, write_data);
4532 local_irq_restore(flags);
4537 * ata_pio_sector - Transfer a sector of data.
4538 * @qc: Command on going
4540 * Transfer qc->sect_size bytes of data from/to the ATA device.
4543 * Inherited from caller.
4546 static void ata_pio_sector(struct ata_queued_cmd *qc)
4548 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4549 struct scatterlist *sg = qc->__sg;
4550 struct ata_port *ap = qc->ap;
4552 unsigned int offset;
4555 if (qc->curbytes == qc->nbytes - qc->sect_size)
4556 ap->hsm_task_state = HSM_ST_LAST;
4558 page = sg[qc->cursg].page;
4559 offset = sg[qc->cursg].offset + qc->cursg_ofs;
4561 /* get the current page and offset */
4562 page = nth_page(page, (offset >> PAGE_SHIFT));
4563 offset %= PAGE_SIZE;
4565 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4567 if (PageHighMem(page)) {
4568 unsigned long flags;
4570 /* FIXME: use a bounce buffer */
4571 local_irq_save(flags);
4572 buf = kmap_atomic(page, KM_IRQ0);
4574 /* do the actual data transfer */
4575 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4577 kunmap_atomic(buf, KM_IRQ0);
4578 local_irq_restore(flags);
4580 buf = page_address(page);
4581 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
4584 qc->curbytes += qc->sect_size;
4585 qc->cursg_ofs += qc->sect_size;
4587 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
4594 * ata_pio_sectors - Transfer one or many sectors.
4595 * @qc: Command on going
4597 * Transfer one or many sectors of data from/to the
4598 * ATA device for the DRQ request.
4601 * Inherited from caller.
4604 static void ata_pio_sectors(struct ata_queued_cmd *qc)
4606 if (is_multi_taskfile(&qc->tf)) {
4607 /* READ/WRITE MULTIPLE */
4610 WARN_ON(qc->dev->multi_count == 0);
4612 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
4613 qc->dev->multi_count);
4621 * atapi_send_cdb - Write CDB bytes to hardware
4622 * @ap: Port to which ATAPI device is attached.
4623 * @qc: Taskfile currently active
4625 * When device has indicated its readiness to accept
4626 * a CDB, this function is called. Send the CDB.
4632 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4635 DPRINTK("send cdb\n");
4636 WARN_ON(qc->dev->cdb_len < 12);
4638 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
4639 ata_altstatus(ap); /* flush */
4641 switch (qc->tf.protocol) {
4642 case ATA_PROT_ATAPI:
4643 ap->hsm_task_state = HSM_ST;
4645 case ATA_PROT_ATAPI_NODATA:
4646 ap->hsm_task_state = HSM_ST_LAST;
4648 case ATA_PROT_ATAPI_DMA:
4649 ap->hsm_task_state = HSM_ST_LAST;
4650 /* initiate bmdma */
4651 ap->ops->bmdma_start(qc);
4657 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4658 * @qc: Command on going
4659 * @bytes: number of bytes
4661 * Transfer Transfer data from/to the ATAPI device.
4664 * Inherited from caller.
4668 static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4670 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
4671 struct scatterlist *sg = qc->__sg;
4672 struct ata_port *ap = qc->ap;
4675 unsigned int offset, count;
4677 if (qc->curbytes + bytes >= qc->nbytes)
4678 ap->hsm_task_state = HSM_ST_LAST;
4681 if (unlikely(qc->cursg >= qc->n_elem)) {
4683 * The end of qc->sg is reached and the device expects
4684 * more data to transfer. In order not to overrun qc->sg
4685 * and fulfill length specified in the byte count register,
4686 * - for read case, discard trailing data from the device
4687 * - for write case, padding zero data to the device
4689 u16 pad_buf[1] = { 0 };
4690 unsigned int words = bytes >> 1;
4693 if (words) /* warning if bytes > 1 */
4694 ata_dev_printk(qc->dev, KERN_WARNING,
4695 "%u bytes trailing data\n", bytes);
4697 for (i = 0; i < words; i++)
4698 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
4700 ap->hsm_task_state = HSM_ST_LAST;
4704 sg = &qc->__sg[qc->cursg];
4707 offset = sg->offset + qc->cursg_ofs;
4709 /* get the current page and offset */
4710 page = nth_page(page, (offset >> PAGE_SHIFT));
4711 offset %= PAGE_SIZE;
4713 /* don't overrun current sg */
4714 count = min(sg->length - qc->cursg_ofs, bytes);
4716 /* don't cross page boundaries */
4717 count = min(count, (unsigned int)PAGE_SIZE - offset);
4719 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4721 if (PageHighMem(page)) {
4722 unsigned long flags;
4724 /* FIXME: use bounce buffer */
4725 local_irq_save(flags);
4726 buf = kmap_atomic(page, KM_IRQ0);
4728 /* do the actual data transfer */
4729 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4731 kunmap_atomic(buf, KM_IRQ0);
4732 local_irq_restore(flags);
4734 buf = page_address(page);
4735 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
4739 qc->curbytes += count;
4740 qc->cursg_ofs += count;
4742 if (qc->cursg_ofs == sg->length) {
4752 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4753 * @qc: Command on going
4755 * Transfer Transfer data from/to the ATAPI device.
4758 * Inherited from caller.
4761 static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4763 struct ata_port *ap = qc->ap;
4764 struct ata_device *dev = qc->dev;
4765 unsigned int ireason, bc_lo, bc_hi, bytes;
4766 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4768 /* Abuse qc->result_tf for temp storage of intermediate TF
4769 * here to save some kernel stack usage.
4770 * For normal completion, qc->result_tf is not relevant. For
4771 * error, qc->result_tf is later overwritten by ata_qc_complete().
4772 * So, the correctness of qc->result_tf is not affected.
4774 ap->ops->tf_read(ap, &qc->result_tf);
4775 ireason = qc->result_tf.nsect;
4776 bc_lo = qc->result_tf.lbam;
4777 bc_hi = qc->result_tf.lbah;
4778 bytes = (bc_hi << 8) | bc_lo;
4780 /* shall be cleared to zero, indicating xfer of data */
4781 if (ireason & (1 << 0))
4784 /* make sure transfer direction matches expected */
4785 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4786 if (do_write != i_write)
4789 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
4791 __atapi_pio_bytes(qc, bytes);
4796 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
4797 qc->err_mask |= AC_ERR_HSM;
4798 ap->hsm_task_state = HSM_ST_ERR;
4802 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4803 * @ap: the target ata_port
4807 * 1 if ok in workqueue, 0 otherwise.
4810 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
4812 if (qc->tf.flags & ATA_TFLAG_POLLING)
4815 if (ap->hsm_task_state == HSM_ST_FIRST) {
4816 if (qc->tf.protocol == ATA_PROT_PIO &&
4817 (qc->tf.flags & ATA_TFLAG_WRITE))
4820 if (is_atapi_taskfile(&qc->tf) &&
4821 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4829 * ata_hsm_qc_complete - finish a qc running on standard HSM
4830 * @qc: Command to complete
4831 * @in_wq: 1 if called from workqueue, 0 otherwise
4833 * Finish @qc which is running on standard HSM.
4836 * If @in_wq is zero, spin_lock_irqsave(host lock).
4837 * Otherwise, none on entry and grabs host lock.
4839 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4841 struct ata_port *ap = qc->ap;
4842 unsigned long flags;
4844 if (ap->ops->error_handler) {
4846 spin_lock_irqsave(ap->lock, flags);
4848 /* EH might have kicked in while host lock is
4851 qc = ata_qc_from_tag(ap, qc->tag);
4853 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4854 ap->ops->irq_on(ap);
4855 ata_qc_complete(qc);
4857 ata_port_freeze(ap);
4860 spin_unlock_irqrestore(ap->lock, flags);
4862 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4863 ata_qc_complete(qc);
4865 ata_port_freeze(ap);
4869 spin_lock_irqsave(ap->lock, flags);
4870 ap->ops->irq_on(ap);
4871 ata_qc_complete(qc);
4872 spin_unlock_irqrestore(ap->lock, flags);
4874 ata_qc_complete(qc);
4879 * ata_hsm_move - move the HSM to the next state.
4880 * @ap: the target ata_port
4882 * @status: current device status
4883 * @in_wq: 1 if called from workqueue, 0 otherwise
4886 * 1 when poll next status needed, 0 otherwise.
4888 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4889 u8 status, int in_wq)
4891 unsigned long flags = 0;
4894 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4896 /* Make sure ata_qc_issue_prot() does not throw things
4897 * like DMA polling into the workqueue. Notice that
4898 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4900 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
4903 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4904 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
4906 switch (ap->hsm_task_state) {
4908 /* Send first data block or PACKET CDB */
4910 /* If polling, we will stay in the work queue after
4911 * sending the data. Otherwise, interrupt handler
4912 * takes over after sending the data.
4914 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4916 /* check device status */
4917 if (unlikely((status & ATA_DRQ) == 0)) {
4918 /* handle BSY=0, DRQ=0 as error */
4919 if (likely(status & (ATA_ERR | ATA_DF)))
4920 /* device stops HSM for abort/error */
4921 qc->err_mask |= AC_ERR_DEV;
4923 /* HSM violation. Let EH handle this */
4924 qc->err_mask |= AC_ERR_HSM;
4926 ap->hsm_task_state = HSM_ST_ERR;
4930 /* Device should not ask for data transfer (DRQ=1)
4931 * when it finds something wrong.
4932 * We ignore DRQ here and stop the HSM by
4933 * changing hsm_task_state to HSM_ST_ERR and
4934 * let the EH abort the command or reset the device.
4936 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4937 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4938 "error, dev_stat 0x%X\n", status);
4939 qc->err_mask |= AC_ERR_HSM;
4940 ap->hsm_task_state = HSM_ST_ERR;
4944 /* Send the CDB (atapi) or the first data block (ata pio out).
4945 * During the state transition, interrupt handler shouldn't
4946 * be invoked before the data transfer is complete and
4947 * hsm_task_state is changed. Hence, the following locking.
4950 spin_lock_irqsave(ap->lock, flags);
4952 if (qc->tf.protocol == ATA_PROT_PIO) {
4953 /* PIO data out protocol.
4954 * send first data block.
4957 /* ata_pio_sectors() might change the state
4958 * to HSM_ST_LAST. so, the state is changed here
4959 * before ata_pio_sectors().
4961 ap->hsm_task_state = HSM_ST;
4962 ata_pio_sectors(qc);
4963 ata_altstatus(ap); /* flush */
4966 atapi_send_cdb(ap, qc);
4969 spin_unlock_irqrestore(ap->lock, flags);
4971 /* if polling, ata_pio_task() handles the rest.
4972 * otherwise, interrupt handler takes over from here.
4977 /* complete command or read/write the data register */
4978 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4979 /* ATAPI PIO protocol */
4980 if ((status & ATA_DRQ) == 0) {
4981 /* No more data to transfer or device error.
4982 * Device error will be tagged in HSM_ST_LAST.
4984 ap->hsm_task_state = HSM_ST_LAST;
4988 /* Device should not ask for data transfer (DRQ=1)
4989 * when it finds something wrong.
4990 * We ignore DRQ here and stop the HSM by
4991 * changing hsm_task_state to HSM_ST_ERR and
4992 * let the EH abort the command or reset the device.
4994 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4995 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4996 "device error, dev_stat 0x%X\n",
4998 qc->err_mask |= AC_ERR_HSM;
4999 ap->hsm_task_state = HSM_ST_ERR;
5003 atapi_pio_bytes(qc);
5005 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5006 /* bad ireason reported by device */
5010 /* ATA PIO protocol */
5011 if (unlikely((status & ATA_DRQ) == 0)) {
5012 /* handle BSY=0, DRQ=0 as error */
5013 if (likely(status & (ATA_ERR | ATA_DF)))
5014 /* device stops HSM for abort/error */
5015 qc->err_mask |= AC_ERR_DEV;
5017 /* HSM violation. Let EH handle this.
5018 * Phantom devices also trigger this
5019 * condition. Mark hint.
5021 qc->err_mask |= AC_ERR_HSM |
5024 ap->hsm_task_state = HSM_ST_ERR;
5028 /* For PIO reads, some devices may ask for
5029 * data transfer (DRQ=1) alone with ERR=1.
5030 * We respect DRQ here and transfer one
5031 * block of junk data before changing the
5032 * hsm_task_state to HSM_ST_ERR.
5034 * For PIO writes, ERR=1 DRQ=1 doesn't make
5035 * sense since the data block has been
5036 * transferred to the device.
5038 if (unlikely(status & (ATA_ERR | ATA_DF))) {
5039 /* data might be corrputed */
5040 qc->err_mask |= AC_ERR_DEV;
5042 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5043 ata_pio_sectors(qc);
5045 status = ata_wait_idle(ap);
5048 if (status & (ATA_BUSY | ATA_DRQ))
5049 qc->err_mask |= AC_ERR_HSM;
5051 /* ata_pio_sectors() might change the
5052 * state to HSM_ST_LAST. so, the state
5053 * is changed after ata_pio_sectors().
5055 ap->hsm_task_state = HSM_ST_ERR;
5059 ata_pio_sectors(qc);
5061 if (ap->hsm_task_state == HSM_ST_LAST &&
5062 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5065 status = ata_wait_idle(ap);
5070 ata_altstatus(ap); /* flush */
5075 if (unlikely(!ata_ok(status))) {
5076 qc->err_mask |= __ac_err_mask(status);
5077 ap->hsm_task_state = HSM_ST_ERR;
5081 /* no more data to transfer */
5082 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
5083 ap->print_id, qc->dev->devno, status);
5085 WARN_ON(qc->err_mask);
5087 ap->hsm_task_state = HSM_ST_IDLE;
5089 /* complete taskfile transaction */
5090 ata_hsm_qc_complete(qc, in_wq);
5096 /* make sure qc->err_mask is available to
5097 * know what's wrong and recover
5099 WARN_ON(qc->err_mask == 0);
5101 ap->hsm_task_state = HSM_ST_IDLE;
5103 /* complete taskfile transaction */
5104 ata_hsm_qc_complete(qc, in_wq);
5116 static void ata_pio_task(struct work_struct *work)
5118 struct ata_port *ap =
5119 container_of(work, struct ata_port, port_task.work);
5120 struct ata_queued_cmd *qc = ap->port_task_data;
5125 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
5128 * This is purely heuristic. This is a fast path.
5129 * Sometimes when we enter, BSY will be cleared in
5130 * a chk-status or two. If not, the drive is probably seeking
5131 * or something. Snooze for a couple msecs, then
5132 * chk-status again. If still busy, queue delayed work.
5134 status = ata_busy_wait(ap, ATA_BUSY, 5);
5135 if (status & ATA_BUSY) {
5137 status = ata_busy_wait(ap, ATA_BUSY, 10);
5138 if (status & ATA_BUSY) {
5139 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
5145 poll_next = ata_hsm_move(ap, qc, status, 1);
5147 /* another command or interrupt handler
5148 * may be running at this point.
5155 * ata_qc_new - Request an available ATA command, for queueing
5156 * @ap: Port associated with device @dev
5157 * @dev: Device from whom we request an available command structure
5163 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5165 struct ata_queued_cmd *qc = NULL;
5168 /* no command while frozen */
5169 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
5172 /* the last tag is reserved for internal command. */
5173 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
5174 if (!test_and_set_bit(i, &ap->qc_allocated)) {
5175 qc = __ata_qc_from_tag(ap, i);
5186 * ata_qc_new_init - Request an available ATA command, and initialize it
5187 * @dev: Device from whom we request an available command structure
5193 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
5195 struct ata_port *ap = dev->link->ap;
5196 struct ata_queued_cmd *qc;
5198 qc = ata_qc_new(ap);
5211 * ata_qc_free - free unused ata_queued_cmd
5212 * @qc: Command to complete
5214 * Designed to free unused ata_queued_cmd object
5215 * in case something prevents using it.
5218 * spin_lock_irqsave(host lock)
5220 void ata_qc_free(struct ata_queued_cmd *qc)
5222 struct ata_port *ap = qc->ap;
5225 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5229 if (likely(ata_tag_valid(tag))) {
5230 qc->tag = ATA_TAG_POISON;
5231 clear_bit(tag, &ap->qc_allocated);
5235 void __ata_qc_complete(struct ata_queued_cmd *qc)
5237 struct ata_port *ap = qc->ap;
5238 struct ata_link *link = qc->dev->link;
5240 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5241 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
5243 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5246 /* command should be marked inactive atomically with qc completion */
5247 if (qc->tf.protocol == ATA_PROT_NCQ)
5248 link->sactive &= ~(1 << qc->tag);
5250 link->active_tag = ATA_TAG_POISON;
5252 /* atapi: mark qc as inactive to prevent the interrupt handler
5253 * from completing the command twice later, before the error handler
5254 * is called. (when rc != 0 and atapi request sense is needed)
5256 qc->flags &= ~ATA_QCFLAG_ACTIVE;
5257 ap->qc_active &= ~(1 << qc->tag);
5259 /* call completion callback */
5260 qc->complete_fn(qc);
5263 static void fill_result_tf(struct ata_queued_cmd *qc)
5265 struct ata_port *ap = qc->ap;
5267 qc->result_tf.flags = qc->tf.flags;
5268 ap->ops->tf_read(ap, &qc->result_tf);
5272 * ata_qc_complete - Complete an active ATA command
5273 * @qc: Command to complete
5274 * @err_mask: ATA Status register contents
5276 * Indicate to the mid and upper layers that an ATA
5277 * command has completed, with either an ok or not-ok status.
5280 * spin_lock_irqsave(host lock)
5282 void ata_qc_complete(struct ata_queued_cmd *qc)
5284 struct ata_port *ap = qc->ap;
5286 /* XXX: New EH and old EH use different mechanisms to
5287 * synchronize EH with regular execution path.
5289 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5290 * Normal execution path is responsible for not accessing a
5291 * failed qc. libata core enforces the rule by returning NULL
5292 * from ata_qc_from_tag() for failed qcs.
5294 * Old EH depends on ata_qc_complete() nullifying completion
5295 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5296 * not synchronize with interrupt handler. Only PIO task is
5299 if (ap->ops->error_handler) {
5300 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
5302 if (unlikely(qc->err_mask))
5303 qc->flags |= ATA_QCFLAG_FAILED;
5305 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5306 if (!ata_tag_internal(qc->tag)) {
5307 /* always fill result TF for failed qc */
5309 ata_qc_schedule_eh(qc);
5314 /* read result TF if requested */
5315 if (qc->flags & ATA_QCFLAG_RESULT_TF)
5318 __ata_qc_complete(qc);
5320 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5323 /* read result TF if failed or requested */
5324 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
5327 __ata_qc_complete(qc);
5332 * ata_qc_complete_multiple - Complete multiple qcs successfully
5333 * @ap: port in question
5334 * @qc_active: new qc_active mask
5335 * @finish_qc: LLDD callback invoked before completing a qc
5337 * Complete in-flight commands. This functions is meant to be
5338 * called from low-level driver's interrupt routine to complete
5339 * requests normally. ap->qc_active and @qc_active is compared
5340 * and commands are completed accordingly.
5343 * spin_lock_irqsave(host lock)
5346 * Number of completed commands on success, -errno otherwise.
5348 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5349 void (*finish_qc)(struct ata_queued_cmd *))
5355 done_mask = ap->qc_active ^ qc_active;
5357 if (unlikely(done_mask & qc_active)) {
5358 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5359 "(%08x->%08x)\n", ap->qc_active, qc_active);
5363 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5364 struct ata_queued_cmd *qc;
5366 if (!(done_mask & (1 << i)))
5369 if ((qc = ata_qc_from_tag(ap, i))) {
5372 ata_qc_complete(qc);
5380 static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5382 struct ata_port *ap = qc->ap;
5384 switch (qc->tf.protocol) {
5387 case ATA_PROT_ATAPI_DMA:
5390 case ATA_PROT_ATAPI:
5392 if (ap->flags & ATA_FLAG_PIO_DMA)
5405 * ata_qc_issue - issue taskfile to device
5406 * @qc: command to issue to device
5408 * Prepare an ATA command to submission to device.
5409 * This includes mapping the data into a DMA-able
5410 * area, filling in the S/G table, and finally
5411 * writing the taskfile to hardware, starting the command.
5414 * spin_lock_irqsave(host lock)
5416 void ata_qc_issue(struct ata_queued_cmd *qc)
5418 struct ata_port *ap = qc->ap;
5419 struct ata_link *link = qc->dev->link;
5421 /* Make sure only one non-NCQ command is outstanding. The
5422 * check is skipped for old EH because it reuses active qc to
5423 * request ATAPI sense.
5425 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
5427 if (qc->tf.protocol == ATA_PROT_NCQ) {
5428 WARN_ON(link->sactive & (1 << qc->tag));
5429 link->sactive |= 1 << qc->tag;
5431 WARN_ON(link->sactive);
5432 link->active_tag = qc->tag;
5435 qc->flags |= ATA_QCFLAG_ACTIVE;
5436 ap->qc_active |= 1 << qc->tag;
5438 if (ata_should_dma_map(qc)) {
5439 if (qc->flags & ATA_QCFLAG_SG) {
5440 if (ata_sg_setup(qc))
5442 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5443 if (ata_sg_setup_one(qc))
5447 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5450 ap->ops->qc_prep(qc);
5452 qc->err_mask |= ap->ops->qc_issue(qc);
5453 if (unlikely(qc->err_mask))
5458 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5459 qc->err_mask |= AC_ERR_SYSTEM;
5461 ata_qc_complete(qc);
5465 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5466 * @qc: command to issue to device
5468 * Using various libata functions and hooks, this function
5469 * starts an ATA command. ATA commands are grouped into
5470 * classes called "protocols", and issuing each type of protocol
5471 * is slightly different.
5473 * May be used as the qc_issue() entry in ata_port_operations.
5476 * spin_lock_irqsave(host lock)
5479 * Zero on success, AC_ERR_* mask on failure
5482 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
5484 struct ata_port *ap = qc->ap;
5486 /* Use polling pio if the LLD doesn't handle
5487 * interrupt driven pio and atapi CDB interrupt.
5489 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5490 switch (qc->tf.protocol) {
5492 case ATA_PROT_NODATA:
5493 case ATA_PROT_ATAPI:
5494 case ATA_PROT_ATAPI_NODATA:
5495 qc->tf.flags |= ATA_TFLAG_POLLING;
5497 case ATA_PROT_ATAPI_DMA:
5498 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
5499 /* see ata_dma_blacklisted() */
5507 /* select the device */
5508 ata_dev_select(ap, qc->dev->devno, 1, 0);
5510 /* start the command */
5511 switch (qc->tf.protocol) {
5512 case ATA_PROT_NODATA:
5513 if (qc->tf.flags & ATA_TFLAG_POLLING)
5514 ata_qc_set_polling(qc);
5516 ata_tf_to_host(ap, &qc->tf);
5517 ap->hsm_task_state = HSM_ST_LAST;
5519 if (qc->tf.flags & ATA_TFLAG_POLLING)
5520 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5525 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5527 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5528 ap->ops->bmdma_setup(qc); /* set up bmdma */
5529 ap->ops->bmdma_start(qc); /* initiate bmdma */
5530 ap->hsm_task_state = HSM_ST_LAST;
5534 if (qc->tf.flags & ATA_TFLAG_POLLING)
5535 ata_qc_set_polling(qc);
5537 ata_tf_to_host(ap, &qc->tf);
5539 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5540 /* PIO data out protocol */
5541 ap->hsm_task_state = HSM_ST_FIRST;
5542 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5544 /* always send first data block using
5545 * the ata_pio_task() codepath.
5548 /* PIO data in protocol */
5549 ap->hsm_task_state = HSM_ST;
5551 if (qc->tf.flags & ATA_TFLAG_POLLING)
5552 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5554 /* if polling, ata_pio_task() handles the rest.
5555 * otherwise, interrupt handler takes over from here.
5561 case ATA_PROT_ATAPI:
5562 case ATA_PROT_ATAPI_NODATA:
5563 if (qc->tf.flags & ATA_TFLAG_POLLING)
5564 ata_qc_set_polling(qc);
5566 ata_tf_to_host(ap, &qc->tf);
5568 ap->hsm_task_state = HSM_ST_FIRST;
5570 /* send cdb by polling if no cdb interrupt */
5571 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5572 (qc->tf.flags & ATA_TFLAG_POLLING))
5573 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5576 case ATA_PROT_ATAPI_DMA:
5577 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
5579 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5580 ap->ops->bmdma_setup(qc); /* set up bmdma */
5581 ap->hsm_task_state = HSM_ST_FIRST;
5583 /* send cdb by polling if no cdb interrupt */
5584 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5585 ata_port_queue_task(ap, ata_pio_task, qc, 0);
5590 return AC_ERR_SYSTEM;
5597 * ata_host_intr - Handle host interrupt for given (port, task)
5598 * @ap: Port on which interrupt arrived (possibly...)
5599 * @qc: Taskfile currently active in engine
5601 * Handle host interrupt for given queued command. Currently,
5602 * only DMA interrupts are handled. All other commands are
5603 * handled via polling with interrupts disabled (nIEN bit).
5606 * spin_lock_irqsave(host lock)
5609 * One if interrupt was handled, zero if not (shared irq).
5612 inline unsigned int ata_host_intr (struct ata_port *ap,
5613 struct ata_queued_cmd *qc)
5615 struct ata_eh_info *ehi = &ap->link.eh_info;
5616 u8 status, host_stat = 0;
5618 VPRINTK("ata%u: protocol %d task_state %d\n",
5619 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
5621 /* Check whether we are expecting interrupt in this state */
5622 switch (ap->hsm_task_state) {
5624 /* Some pre-ATAPI-4 devices assert INTRQ
5625 * at this state when ready to receive CDB.
5628 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5629 * The flag was turned on only for atapi devices.
5630 * No need to check is_atapi_taskfile(&qc->tf) again.
5632 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5636 if (qc->tf.protocol == ATA_PROT_DMA ||
5637 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5638 /* check status of DMA engine */
5639 host_stat = ap->ops->bmdma_status(ap);
5640 VPRINTK("ata%u: host_stat 0x%X\n",
5641 ap->print_id, host_stat);
5643 /* if it's not our irq... */
5644 if (!(host_stat & ATA_DMA_INTR))
5647 /* before we do anything else, clear DMA-Start bit */
5648 ap->ops->bmdma_stop(qc);
5650 if (unlikely(host_stat & ATA_DMA_ERR)) {
5651 /* error when transfering data to/from memory */
5652 qc->err_mask |= AC_ERR_HOST_BUS;
5653 ap->hsm_task_state = HSM_ST_ERR;
5663 /* check altstatus */
5664 status = ata_altstatus(ap);
5665 if (status & ATA_BUSY)
5668 /* check main status, clearing INTRQ */
5669 status = ata_chk_status(ap);
5670 if (unlikely(status & ATA_BUSY))
5673 /* ack bmdma irq events */
5674 ap->ops->irq_clear(ap);
5676 ata_hsm_move(ap, qc, status, 0);
5678 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5679 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5680 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5682 return 1; /* irq handled */
5685 ap->stats.idle_irq++;
5688 if ((ap->stats.idle_irq % 1000) == 0) {
5689 ap->ops->irq_ack(ap, 0); /* debug trap */
5690 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
5694 return 0; /* irq not handled */
5698 * ata_interrupt - Default ATA host interrupt handler
5699 * @irq: irq line (unused)
5700 * @dev_instance: pointer to our ata_host information structure
5702 * Default interrupt handler for PCI IDE devices. Calls
5703 * ata_host_intr() for each port that is not disabled.
5706 * Obtains host lock during operation.
5709 * IRQ_NONE or IRQ_HANDLED.
5712 irqreturn_t ata_interrupt (int irq, void *dev_instance)
5714 struct ata_host *host = dev_instance;
5716 unsigned int handled = 0;
5717 unsigned long flags;
5719 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
5720 spin_lock_irqsave(&host->lock, flags);
5722 for (i = 0; i < host->n_ports; i++) {
5723 struct ata_port *ap;
5725 ap = host->ports[i];
5727 !(ap->flags & ATA_FLAG_DISABLED)) {
5728 struct ata_queued_cmd *qc;
5730 qc = ata_qc_from_tag(ap, ap->link.active_tag);
5731 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
5732 (qc->flags & ATA_QCFLAG_ACTIVE))
5733 handled |= ata_host_intr(ap, qc);
5737 spin_unlock_irqrestore(&host->lock, flags);
5739 return IRQ_RETVAL(handled);
5743 * sata_scr_valid - test whether SCRs are accessible
5744 * @ap: ATA port to test SCR accessibility for
5746 * Test whether SCRs are accessible for @ap.
5752 * 1 if SCRs are accessible, 0 otherwise.
5754 int sata_scr_valid(struct ata_port *ap)
5756 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
5760 * sata_scr_read - read SCR register of the specified port
5761 * @ap: ATA port to read SCR for
5763 * @val: Place to store read value
5765 * Read SCR register @reg of @ap into *@val. This function is
5766 * guaranteed to succeed if the cable type of the port is SATA
5767 * and the port implements ->scr_read.
5773 * 0 on success, negative errno on failure.
5775 int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5777 if (sata_scr_valid(ap))
5778 return ap->ops->scr_read(ap, reg, val);
5783 * sata_scr_write - write SCR register of the specified port
5784 * @ap: ATA port to write SCR for
5785 * @reg: SCR to write
5786 * @val: value to write
5788 * Write @val to SCR register @reg of @ap. This function is
5789 * guaranteed to succeed if the cable type of the port is SATA
5790 * and the port implements ->scr_read.
5796 * 0 on success, negative errno on failure.
5798 int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5800 if (sata_scr_valid(ap))
5801 return ap->ops->scr_write(ap, reg, val);
5806 * sata_scr_write_flush - write SCR register of the specified port and flush
5807 * @ap: ATA port to write SCR for
5808 * @reg: SCR to write
5809 * @val: value to write
5811 * This function is identical to sata_scr_write() except that this
5812 * function performs flush after writing to the register.
5818 * 0 on success, negative errno on failure.
5820 int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5824 if (sata_scr_valid(ap)) {
5825 rc = ap->ops->scr_write(ap, reg, val);
5827 rc = ap->ops->scr_read(ap, reg, &val);
5834 * ata_port_online - test whether the given port is online
5835 * @ap: ATA port to test
5837 * Test whether @ap is online. Note that this function returns 0
5838 * if online status of @ap cannot be obtained, so
5839 * ata_port_online(ap) != !ata_port_offline(ap).
5845 * 1 if the port online status is available and online.
5847 int ata_port_online(struct ata_port *ap)
5851 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5857 * ata_port_offline - test whether the given port is offline
5858 * @ap: ATA port to test
5860 * Test whether @ap is offline. Note that this function returns
5861 * 0 if offline status of @ap cannot be obtained, so
5862 * ata_port_online(ap) != !ata_port_offline(ap).
5868 * 1 if the port offline status is available and offline.
5870 int ata_port_offline(struct ata_port *ap)
5874 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5879 int ata_flush_cache(struct ata_device *dev)
5881 unsigned int err_mask;
5884 if (!ata_try_flush_cache(dev))
5887 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
5888 cmd = ATA_CMD_FLUSH_EXT;
5890 cmd = ATA_CMD_FLUSH;
5892 err_mask = ata_do_simple_cmd(dev, cmd);
5894 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5902 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5903 unsigned int action, unsigned int ehi_flags,
5906 unsigned long flags;
5909 for (i = 0; i < host->n_ports; i++) {
5910 struct ata_port *ap = host->ports[i];
5912 /* Previous resume operation might still be in
5913 * progress. Wait for PM_PENDING to clear.
5915 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5916 ata_port_wait_eh(ap);
5917 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5920 /* request PM ops to EH */
5921 spin_lock_irqsave(ap->lock, flags);
5926 ap->pm_result = &rc;
5929 ap->pflags |= ATA_PFLAG_PM_PENDING;
5930 ap->link.eh_info.action |= action;
5931 ap->link.eh_info.flags |= ehi_flags;
5933 ata_port_schedule_eh(ap);
5935 spin_unlock_irqrestore(ap->lock, flags);
5937 /* wait and check result */
5939 ata_port_wait_eh(ap);
5940 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5950 * ata_host_suspend - suspend host
5951 * @host: host to suspend
5954 * Suspend @host. Actual operation is performed by EH. This
5955 * function requests EH to perform PM operations and waits for EH
5959 * Kernel thread context (may sleep).
5962 * 0 on success, -errno on failure.
5964 int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
5968 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
5970 host->dev->power.power_state = mesg;
5975 * ata_host_resume - resume host
5976 * @host: host to resume
5978 * Resume @host. Actual operation is performed by EH. This
5979 * function requests EH to perform PM operations and returns.
5980 * Note that all resume operations are performed parallely.
5983 * Kernel thread context (may sleep).
5985 void ata_host_resume(struct ata_host *host)
5987 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5988 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5989 host->dev->power.power_state = PMSG_ON;
5994 * ata_port_start - Set port up for dma.
5995 * @ap: Port to initialize
5997 * Called just after data structures for each port are
5998 * initialized. Allocates space for PRD table.
6000 * May be used as the port_start() entry in ata_port_operations.
6003 * Inherited from caller.
6005 int ata_port_start(struct ata_port *ap)
6007 struct device *dev = ap->dev;
6010 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6015 rc = ata_pad_alloc(ap, dev);
6019 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6020 (unsigned long long)ap->prd_dma);
6025 * ata_dev_init - Initialize an ata_device structure
6026 * @dev: Device structure to initialize
6028 * Initialize @dev in preparation for probing.
6031 * Inherited from caller.
6033 void ata_dev_init(struct ata_device *dev)
6035 struct ata_link *link = dev->link;
6036 struct ata_port *ap = link->ap;
6037 unsigned long flags;
6039 /* SATA spd limit is bound to the first device */
6040 link->sata_spd_limit = link->hw_sata_spd_limit;
6043 /* High bits of dev->flags are used to record warm plug
6044 * requests which occur asynchronously. Synchronize using
6047 spin_lock_irqsave(ap->lock, flags);
6048 dev->flags &= ~ATA_DFLAG_INIT_MASK;
6050 spin_unlock_irqrestore(ap->lock, flags);
6052 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6053 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
6054 dev->pio_mask = UINT_MAX;
6055 dev->mwdma_mask = UINT_MAX;
6056 dev->udma_mask = UINT_MAX;
6060 * ata_port_alloc - allocate and initialize basic ATA port resources
6061 * @host: ATA host this allocated port belongs to
6063 * Allocate and initialize basic ATA port resources.
6066 * Allocate ATA port on success, NULL on failure.
6069 * Inherited from calling layer (may sleep).
6071 struct ata_port *ata_port_alloc(struct ata_host *host)
6073 struct ata_port *ap;
6078 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6082 ap->pflags |= ATA_PFLAG_INITIALIZING;
6083 ap->lock = &host->lock;
6084 ap->flags = ATA_FLAG_DISABLED;
6086 ap->ctl = ATA_DEVCTL_OBS;
6088 ap->dev = host->dev;
6090 ap->link.hw_sata_spd_limit = UINT_MAX;
6091 ap->link.active_tag = ATA_TAG_POISON;
6092 ap->last_ctl = 0xFF;
6094 #if defined(ATA_VERBOSE_DEBUG)
6095 /* turn on all debugging levels */
6096 ap->msg_enable = 0x00FF;
6097 #elif defined(ATA_DEBUG)
6098 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
6100 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
6103 INIT_DELAYED_WORK(&ap->port_task, NULL);
6104 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6105 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
6106 INIT_LIST_HEAD(&ap->eh_done_q);
6107 init_waitqueue_head(&ap->eh_wait_q);
6108 init_timer_deferrable(&ap->fastdrain_timer);
6109 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6110 ap->fastdrain_timer.data = (unsigned long)ap;
6112 ap->cbl = ATA_CBL_NONE;
6116 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6117 struct ata_device *dev = &ap->link.device[i];
6118 dev->link = &ap->link;
6124 ap->stats.unhandled_irq = 1;
6125 ap->stats.idle_irq = 1;
6130 static void ata_host_release(struct device *gendev, void *res)
6132 struct ata_host *host = dev_get_drvdata(gendev);
6135 for (i = 0; i < host->n_ports; i++) {
6136 struct ata_port *ap = host->ports[i];
6141 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
6142 ap->ops->port_stop(ap);
6145 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
6146 host->ops->host_stop(host);
6148 for (i = 0; i < host->n_ports; i++) {
6149 struct ata_port *ap = host->ports[i];
6155 scsi_host_put(ap->scsi_host);
6158 host->ports[i] = NULL;
6161 dev_set_drvdata(gendev, NULL);
6165 * ata_host_alloc - allocate and init basic ATA host resources
6166 * @dev: generic device this host is associated with
6167 * @max_ports: maximum number of ATA ports associated with this host
6169 * Allocate and initialize basic ATA host resources. LLD calls
6170 * this function to allocate a host, initializes it fully and
6171 * attaches it using ata_host_register().
6173 * @max_ports ports are allocated and host->n_ports is
6174 * initialized to @max_ports. The caller is allowed to decrease
6175 * host->n_ports before calling ata_host_register(). The unused
6176 * ports will be automatically freed on registration.
6179 * Allocate ATA host on success, NULL on failure.
6182 * Inherited from calling layer (may sleep).
6184 struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6186 struct ata_host *host;
6192 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6195 /* alloc a container for our list of ATA ports (buses) */
6196 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6197 /* alloc a container for our list of ATA ports (buses) */
6198 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6202 devres_add(dev, host);
6203 dev_set_drvdata(dev, host);
6205 spin_lock_init(&host->lock);
6207 host->n_ports = max_ports;
6209 /* allocate ports bound to this host */
6210 for (i = 0; i < max_ports; i++) {
6211 struct ata_port *ap;
6213 ap = ata_port_alloc(host);
6218 host->ports[i] = ap;
6221 devres_remove_group(dev, NULL);
6225 devres_release_group(dev, NULL);
6230 * ata_host_alloc_pinfo - alloc host and init with port_info array
6231 * @dev: generic device this host is associated with
6232 * @ppi: array of ATA port_info to initialize host with
6233 * @n_ports: number of ATA ports attached to this host
6235 * Allocate ATA host and initialize with info from @ppi. If NULL
6236 * terminated, @ppi may contain fewer entries than @n_ports. The
6237 * last entry will be used for the remaining ports.
6240 * Allocate ATA host on success, NULL on failure.
6243 * Inherited from calling layer (may sleep).
6245 struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6246 const struct ata_port_info * const * ppi,
6249 const struct ata_port_info *pi;
6250 struct ata_host *host;
6253 host = ata_host_alloc(dev, n_ports);
6257 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6258 struct ata_port *ap = host->ports[i];
6263 ap->pio_mask = pi->pio_mask;
6264 ap->mwdma_mask = pi->mwdma_mask;
6265 ap->udma_mask = pi->udma_mask;
6266 ap->flags |= pi->flags;
6267 ap->ops = pi->port_ops;
6269 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6270 host->ops = pi->port_ops;
6271 if (!host->private_data && pi->private_data)
6272 host->private_data = pi->private_data;
6279 * ata_host_start - start and freeze ports of an ATA host
6280 * @host: ATA host to start ports for
6282 * Start and then freeze ports of @host. Started status is
6283 * recorded in host->flags, so this function can be called
6284 * multiple times. Ports are guaranteed to get started only
6285 * once. If host->ops isn't initialized yet, its set to the
6286 * first non-dummy port ops.
6289 * Inherited from calling layer (may sleep).
6292 * 0 if all ports are started successfully, -errno otherwise.
6294 int ata_host_start(struct ata_host *host)
6298 if (host->flags & ATA_HOST_STARTED)
6301 for (i = 0; i < host->n_ports; i++) {
6302 struct ata_port *ap = host->ports[i];
6304 if (!host->ops && !ata_port_is_dummy(ap))
6305 host->ops = ap->ops;
6307 if (ap->ops->port_start) {
6308 rc = ap->ops->port_start(ap);
6310 ata_port_printk(ap, KERN_ERR, "failed to "
6311 "start port (errno=%d)\n", rc);
6316 ata_eh_freeze_port(ap);
6319 host->flags |= ATA_HOST_STARTED;
6324 struct ata_port *ap = host->ports[i];
6326 if (ap->ops->port_stop)
6327 ap->ops->port_stop(ap);
6333 * ata_sas_host_init - Initialize a host struct
6334 * @host: host to initialize
6335 * @dev: device host is attached to
6336 * @flags: host flags
6340 * PCI/etc. bus probe sem.
6343 /* KILLME - the only user left is ipr */
6344 void ata_host_init(struct ata_host *host, struct device *dev,
6345 unsigned long flags, const struct ata_port_operations *ops)
6347 spin_lock_init(&host->lock);
6349 host->flags = flags;
6354 * ata_host_register - register initialized ATA host
6355 * @host: ATA host to register
6356 * @sht: template for SCSI host
6358 * Register initialized ATA host. @host is allocated using
6359 * ata_host_alloc() and fully initialized by LLD. This function
6360 * starts ports, registers @host with ATA and SCSI layers and
6361 * probe registered devices.
6364 * Inherited from calling layer (may sleep).
6367 * 0 on success, -errno otherwise.
6369 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6373 /* host must have been started */
6374 if (!(host->flags & ATA_HOST_STARTED)) {
6375 dev_printk(KERN_ERR, host->dev,
6376 "BUG: trying to register unstarted host\n");
6381 /* Blow away unused ports. This happens when LLD can't
6382 * determine the exact number of ports to allocate at
6385 for (i = host->n_ports; host->ports[i]; i++)
6386 kfree(host->ports[i]);
6388 /* give ports names and add SCSI hosts */
6389 for (i = 0; i < host->n_ports; i++)
6390 host->ports[i]->print_id = ata_print_id++;
6392 rc = ata_scsi_add_hosts(host, sht);
6396 /* associate with ACPI nodes */
6397 ata_acpi_associate(host);
6399 /* set cable, sata_spd_limit and report */
6400 for (i = 0; i < host->n_ports; i++) {
6401 struct ata_port *ap = host->ports[i];
6404 unsigned long xfer_mask;
6406 /* set SATA cable type if still unset */
6407 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6408 ap->cbl = ATA_CBL_SATA;
6410 /* init sata_spd_limit to the current value */
6411 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6412 int spd = (scontrol >> 4) & 0xf;
6414 ap->link.hw_sata_spd_limit &= (1 << spd) - 1;
6416 ap->link.sata_spd_limit = ap->link.hw_sata_spd_limit;
6418 /* report the secondary IRQ for second channel legacy */
6419 irq_line = host->irq;
6420 if (i == 1 && host->irq2)
6421 irq_line = host->irq2;
6423 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6426 /* print per-port info to dmesg */
6427 if (!ata_port_is_dummy(ap))
6428 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6429 "ctl 0x%p bmdma 0x%p irq %d\n",
6430 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
6431 ata_mode_string(xfer_mask),
6432 ap->ioaddr.cmd_addr,
6433 ap->ioaddr.ctl_addr,
6434 ap->ioaddr.bmdma_addr,
6437 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6440 /* perform each probe synchronously */
6441 DPRINTK("probe begin\n");
6442 for (i = 0; i < host->n_ports; i++) {
6443 struct ata_port *ap = host->ports[i];
6447 if (ap->ops->error_handler) {
6448 struct ata_eh_info *ehi = &ap->link.eh_info;
6449 unsigned long flags;
6453 /* kick EH for boot probing */
6454 spin_lock_irqsave(ap->lock, flags);
6456 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6457 ehi->action |= ATA_EH_SOFTRESET;
6458 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6460 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
6461 ap->pflags |= ATA_PFLAG_LOADING;
6462 ata_port_schedule_eh(ap);
6464 spin_unlock_irqrestore(ap->lock, flags);
6466 /* wait for EH to finish */
6467 ata_port_wait_eh(ap);
6469 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6470 rc = ata_bus_probe(ap);
6471 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6474 /* FIXME: do something useful here?
6475 * Current libata behavior will
6476 * tear down everything when
6477 * the module is removed
6478 * or the h/w is unplugged.
6484 /* probes are done, now scan each port's disk(s) */
6485 DPRINTK("host probe begin\n");
6486 for (i = 0; i < host->n_ports; i++) {
6487 struct ata_port *ap = host->ports[i];
6489 ata_scsi_scan_host(ap, 1);
6496 * ata_host_activate - start host, request IRQ and register it
6497 * @host: target ATA host
6498 * @irq: IRQ to request
6499 * @irq_handler: irq_handler used when requesting IRQ
6500 * @irq_flags: irq_flags used when requesting IRQ
6501 * @sht: scsi_host_template to use when registering the host
6503 * After allocating an ATA host and initializing it, most libata
6504 * LLDs perform three steps to activate the host - start host,
6505 * request IRQ and register it. This helper takes necessasry
6506 * arguments and performs the three steps in one go.
6509 * Inherited from calling layer (may sleep).
6512 * 0 on success, -errno otherwise.
6514 int ata_host_activate(struct ata_host *host, int irq,
6515 irq_handler_t irq_handler, unsigned long irq_flags,
6516 struct scsi_host_template *sht)
6520 rc = ata_host_start(host);
6524 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6525 dev_driver_string(host->dev), host);
6529 /* Used to print device info at probe */
6532 rc = ata_host_register(host, sht);
6533 /* if failed, just free the IRQ and leave ports alone */
6535 devm_free_irq(host->dev, irq, host);
6541 * ata_port_detach - Detach ATA port in prepration of device removal
6542 * @ap: ATA port to be detached
6544 * Detach all ATA devices and the associated SCSI devices of @ap;
6545 * then, remove the associated SCSI host. @ap is guaranteed to
6546 * be quiescent on return from this function.
6549 * Kernel thread context (may sleep).
6551 void ata_port_detach(struct ata_port *ap)
6553 unsigned long flags;
6556 if (!ap->ops->error_handler)
6559 /* tell EH we're leaving & flush EH */
6560 spin_lock_irqsave(ap->lock, flags);
6561 ap->pflags |= ATA_PFLAG_UNLOADING;
6562 spin_unlock_irqrestore(ap->lock, flags);
6564 ata_port_wait_eh(ap);
6566 /* EH is now guaranteed to see UNLOADING, so no new device
6567 * will be attached. Disable all existing devices.
6569 spin_lock_irqsave(ap->lock, flags);
6571 for (i = 0; i < ATA_MAX_DEVICES; i++)
6572 ata_dev_disable(&ap->link.device[i]);
6574 spin_unlock_irqrestore(ap->lock, flags);
6576 /* Final freeze & EH. All in-flight commands are aborted. EH
6577 * will be skipped and retrials will be terminated with bad
6580 spin_lock_irqsave(ap->lock, flags);
6581 ata_port_freeze(ap); /* won't be thawed */
6582 spin_unlock_irqrestore(ap->lock, flags);
6584 ata_port_wait_eh(ap);
6585 cancel_rearming_delayed_work(&ap->hotplug_task);
6588 /* remove the associated SCSI host */
6589 scsi_remove_host(ap->scsi_host);
6593 * ata_host_detach - Detach all ports of an ATA host
6594 * @host: Host to detach
6596 * Detach all ports of @host.
6599 * Kernel thread context (may sleep).
6601 void ata_host_detach(struct ata_host *host)
6605 for (i = 0; i < host->n_ports; i++)
6606 ata_port_detach(host->ports[i]);
6610 * ata_std_ports - initialize ioaddr with standard port offsets.
6611 * @ioaddr: IO address structure to be initialized
6613 * Utility function which initializes data_addr, error_addr,
6614 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6615 * device_addr, status_addr, and command_addr to standard offsets
6616 * relative to cmd_addr.
6618 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
6621 void ata_std_ports(struct ata_ioports *ioaddr)
6623 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6624 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6625 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6626 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6627 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6628 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6629 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6630 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6631 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6632 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6639 * ata_pci_remove_one - PCI layer callback for device removal
6640 * @pdev: PCI device that was removed
6642 * PCI layer indicates to libata via this hook that hot-unplug or
6643 * module unload event has occurred. Detach all ports. Resource
6644 * release is handled via devres.
6647 * Inherited from PCI layer (may sleep).
6649 void ata_pci_remove_one(struct pci_dev *pdev)
6651 struct device *dev = pci_dev_to_dev(pdev);
6652 struct ata_host *host = dev_get_drvdata(dev);
6654 ata_host_detach(host);
6657 /* move to PCI subsystem */
6658 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
6660 unsigned long tmp = 0;
6662 switch (bits->width) {
6665 pci_read_config_byte(pdev, bits->reg, &tmp8);
6671 pci_read_config_word(pdev, bits->reg, &tmp16);
6677 pci_read_config_dword(pdev, bits->reg, &tmp32);
6688 return (tmp == bits->val) ? 1 : 0;
6692 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
6694 pci_save_state(pdev);
6695 pci_disable_device(pdev);
6697 if (mesg.event == PM_EVENT_SUSPEND)
6698 pci_set_power_state(pdev, PCI_D3hot);
6701 int ata_pci_device_do_resume(struct pci_dev *pdev)
6705 pci_set_power_state(pdev, PCI_D0);
6706 pci_restore_state(pdev);
6708 rc = pcim_enable_device(pdev);
6710 dev_printk(KERN_ERR, &pdev->dev,
6711 "failed to enable device after resume (%d)\n", rc);
6715 pci_set_master(pdev);
6719 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
6721 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6724 rc = ata_host_suspend(host, mesg);
6728 ata_pci_device_do_suspend(pdev, mesg);
6733 int ata_pci_device_resume(struct pci_dev *pdev)
6735 struct ata_host *host = dev_get_drvdata(&pdev->dev);
6738 rc = ata_pci_device_do_resume(pdev);
6740 ata_host_resume(host);
6743 #endif /* CONFIG_PM */
6745 #endif /* CONFIG_PCI */
6748 static int __init ata_init(void)
6750 ata_probe_timeout *= HZ;
6751 ata_wq = create_workqueue("ata");
6755 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6757 destroy_workqueue(ata_wq);
6761 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6765 static void __exit ata_exit(void)
6767 destroy_workqueue(ata_wq);
6768 destroy_workqueue(ata_aux_wq);
6771 subsys_initcall(ata_init);
6772 module_exit(ata_exit);
6774 static unsigned long ratelimit_time;
6775 static DEFINE_SPINLOCK(ata_ratelimit_lock);
6777 int ata_ratelimit(void)
6780 unsigned long flags;
6782 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6784 if (time_after(jiffies, ratelimit_time)) {
6786 ratelimit_time = jiffies + (HZ/5);
6790 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6796 * ata_wait_register - wait until register value changes
6797 * @reg: IO-mapped register
6798 * @mask: Mask to apply to read register value
6799 * @val: Wait condition
6800 * @interval_msec: polling interval in milliseconds
6801 * @timeout_msec: timeout in milliseconds
6803 * Waiting for some bits of register to change is a common
6804 * operation for ATA controllers. This function reads 32bit LE
6805 * IO-mapped register @reg and tests for the following condition.
6807 * (*@reg & mask) != val
6809 * If the condition is met, it returns; otherwise, the process is
6810 * repeated after @interval_msec until timeout.
6813 * Kernel thread context (may sleep)
6816 * The final register value.
6818 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6819 unsigned long interval_msec,
6820 unsigned long timeout_msec)
6822 unsigned long timeout;
6825 tmp = ioread32(reg);
6827 /* Calculate timeout _after_ the first read to make sure
6828 * preceding writes reach the controller before starting to
6829 * eat away the timeout.
6831 timeout = jiffies + (timeout_msec * HZ) / 1000;
6833 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6834 msleep(interval_msec);
6835 tmp = ioread32(reg);
6844 static void ata_dummy_noret(struct ata_port *ap) { }
6845 static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6846 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6848 static u8 ata_dummy_check_status(struct ata_port *ap)
6853 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6855 return AC_ERR_SYSTEM;
6858 const struct ata_port_operations ata_dummy_port_ops = {
6859 .port_disable = ata_port_disable,
6860 .check_status = ata_dummy_check_status,
6861 .check_altstatus = ata_dummy_check_status,
6862 .dev_select = ata_noop_dev_select,
6863 .qc_prep = ata_noop_qc_prep,
6864 .qc_issue = ata_dummy_qc_issue,
6865 .freeze = ata_dummy_noret,
6866 .thaw = ata_dummy_noret,
6867 .error_handler = ata_dummy_noret,
6868 .post_internal_cmd = ata_dummy_qc_noret,
6869 .irq_clear = ata_dummy_noret,
6870 .port_start = ata_dummy_ret0,
6871 .port_stop = ata_dummy_noret,
6874 const struct ata_port_info ata_dummy_port_info = {
6875 .port_ops = &ata_dummy_port_ops,
6879 * libata is essentially a library of internal helper functions for
6880 * low-level ATA host controller drivers. As such, the API/ABI is
6881 * likely to change as new drivers are added and updated.
6882 * Do not depend on ABI/API stability.
6885 EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6886 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6887 EXPORT_SYMBOL_GPL(sata_deb_timing_long);
6888 EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
6889 EXPORT_SYMBOL_GPL(ata_dummy_port_info);
6890 EXPORT_SYMBOL_GPL(ata_std_bios_param);
6891 EXPORT_SYMBOL_GPL(ata_std_ports);
6892 EXPORT_SYMBOL_GPL(ata_host_init);
6893 EXPORT_SYMBOL_GPL(ata_host_alloc);
6894 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
6895 EXPORT_SYMBOL_GPL(ata_host_start);
6896 EXPORT_SYMBOL_GPL(ata_host_register);
6897 EXPORT_SYMBOL_GPL(ata_host_activate);
6898 EXPORT_SYMBOL_GPL(ata_host_detach);
6899 EXPORT_SYMBOL_GPL(ata_sg_init);
6900 EXPORT_SYMBOL_GPL(ata_sg_init_one);
6901 EXPORT_SYMBOL_GPL(ata_hsm_move);
6902 EXPORT_SYMBOL_GPL(ata_qc_complete);
6903 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
6904 EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
6905 EXPORT_SYMBOL_GPL(ata_tf_load);
6906 EXPORT_SYMBOL_GPL(ata_tf_read);
6907 EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6908 EXPORT_SYMBOL_GPL(ata_std_dev_select);
6909 EXPORT_SYMBOL_GPL(sata_print_link_status);
6910 EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6911 EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6912 EXPORT_SYMBOL_GPL(ata_check_status);
6913 EXPORT_SYMBOL_GPL(ata_altstatus);
6914 EXPORT_SYMBOL_GPL(ata_exec_command);
6915 EXPORT_SYMBOL_GPL(ata_port_start);
6916 EXPORT_SYMBOL_GPL(ata_sff_port_start);
6917 EXPORT_SYMBOL_GPL(ata_interrupt);
6918 EXPORT_SYMBOL_GPL(ata_do_set_mode);
6919 EXPORT_SYMBOL_GPL(ata_data_xfer);
6920 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
6921 EXPORT_SYMBOL_GPL(ata_qc_prep);
6922 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
6923 EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
6924 EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6925 EXPORT_SYMBOL_GPL(ata_bmdma_start);
6926 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6927 EXPORT_SYMBOL_GPL(ata_bmdma_status);
6928 EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6929 EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6930 EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6931 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6932 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6933 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
6934 EXPORT_SYMBOL_GPL(ata_port_probe);
6935 EXPORT_SYMBOL_GPL(ata_dev_disable);
6936 EXPORT_SYMBOL_GPL(sata_set_spd);
6937 EXPORT_SYMBOL_GPL(sata_phy_debounce);
6938 EXPORT_SYMBOL_GPL(sata_phy_resume);
6939 EXPORT_SYMBOL_GPL(sata_phy_reset);
6940 EXPORT_SYMBOL_GPL(__sata_phy_reset);
6941 EXPORT_SYMBOL_GPL(ata_bus_reset);
6942 EXPORT_SYMBOL_GPL(ata_std_prereset);
6943 EXPORT_SYMBOL_GPL(ata_std_softreset);
6944 EXPORT_SYMBOL_GPL(sata_port_hardreset);
6945 EXPORT_SYMBOL_GPL(sata_std_hardreset);
6946 EXPORT_SYMBOL_GPL(ata_std_postreset);
6947 EXPORT_SYMBOL_GPL(ata_dev_classify);
6948 EXPORT_SYMBOL_GPL(ata_dev_pair);
6949 EXPORT_SYMBOL_GPL(ata_port_disable);
6950 EXPORT_SYMBOL_GPL(ata_ratelimit);
6951 EXPORT_SYMBOL_GPL(ata_wait_register);
6952 EXPORT_SYMBOL_GPL(ata_busy_sleep);
6953 EXPORT_SYMBOL_GPL(ata_wait_ready);
6954 EXPORT_SYMBOL_GPL(ata_port_queue_task);
6955 EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6956 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
6957 EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
6958 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
6959 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
6960 EXPORT_SYMBOL_GPL(ata_host_intr);
6961 EXPORT_SYMBOL_GPL(sata_scr_valid);
6962 EXPORT_SYMBOL_GPL(sata_scr_read);
6963 EXPORT_SYMBOL_GPL(sata_scr_write);
6964 EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6965 EXPORT_SYMBOL_GPL(ata_port_online);
6966 EXPORT_SYMBOL_GPL(ata_port_offline);
6968 EXPORT_SYMBOL_GPL(ata_host_suspend);
6969 EXPORT_SYMBOL_GPL(ata_host_resume);
6970 #endif /* CONFIG_PM */
6971 EXPORT_SYMBOL_GPL(ata_id_string);
6972 EXPORT_SYMBOL_GPL(ata_id_c_string);
6973 EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6974 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6976 EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6977 EXPORT_SYMBOL_GPL(ata_timing_compute);
6978 EXPORT_SYMBOL_GPL(ata_timing_merge);
6981 EXPORT_SYMBOL_GPL(pci_test_config_bits);
6982 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
6983 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
6984 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
6985 EXPORT_SYMBOL_GPL(ata_pci_init_one);
6986 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6988 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6989 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
6990 EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6991 EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6992 #endif /* CONFIG_PM */
6993 EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6994 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
6995 #endif /* CONFIG_PCI */
6997 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6998 EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6999 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
7000 EXPORT_SYMBOL_GPL(ata_eng_timeout);
7001 EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
7002 EXPORT_SYMBOL_GPL(ata_port_abort);
7003 EXPORT_SYMBOL_GPL(ata_port_freeze);
7004 EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7005 EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
7006 EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7007 EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
7008 EXPORT_SYMBOL_GPL(ata_do_eh);
7009 EXPORT_SYMBOL_GPL(ata_irq_on);
7010 EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
7011 EXPORT_SYMBOL_GPL(ata_irq_ack);
7012 EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
7013 EXPORT_SYMBOL_GPL(ata_dev_try_classify);
7015 EXPORT_SYMBOL_GPL(ata_cable_40wire);
7016 EXPORT_SYMBOL_GPL(ata_cable_80wire);
7017 EXPORT_SYMBOL_GPL(ata_cable_unknown);
7018 EXPORT_SYMBOL_GPL(ata_cable_sata);