2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/device.h>
44 #include <linux/dmi.h>
45 #include <linux/gfp.h>
46 #include <scsi/scsi_host.h>
47 #include <scsi/scsi_cmnd.h>
48 #include <linux/libata.h>
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "3.0"
53 /* Enclosure Management Control */
54 #define EM_CTRL_MSG_TYPE 0x000f0000
56 /* Enclosure Management LED Message Type */
57 #define EM_MSG_LED_HBA_PORT 0x0000000f
58 #define EM_MSG_LED_PMP_SLOT 0x0000ff00
59 #define EM_MSG_LED_VALUE 0xffff0000
60 #define EM_MSG_LED_VALUE_ACTIVITY 0x00070000
61 #define EM_MSG_LED_VALUE_OFF 0xfff80000
62 #define EM_MSG_LED_VALUE_ON 0x00010000
64 static int ahci_skip_host_reset;
65 static int ahci_ignore_sss;
67 module_param_named(skip_host_reset, ahci_skip_host_reset, int, 0444);
68 MODULE_PARM_DESC(skip_host_reset, "skip global host reset (0=don't skip, 1=skip)");
70 module_param_named(ignore_sss, ahci_ignore_sss, int, 0444);
71 MODULE_PARM_DESC(ignore_sss, "Ignore staggered spinup flag (0=don't ignore, 1=ignore)");
73 static int ahci_enable_alpm(struct ata_port *ap,
75 static void ahci_disable_alpm(struct ata_port *ap);
76 static ssize_t ahci_led_show(struct ata_port *ap, char *buf);
77 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
79 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
85 AHCI_MAX_SG = 168, /* hardware max is 64K */
86 AHCI_DMA_BOUNDARY = 0xffffffff,
89 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
91 AHCI_CMD_TBL_CDB = 0x40,
92 AHCI_CMD_TBL_HDR_SZ = 0x80,
93 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
94 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
95 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
97 AHCI_PORT_PRIV_FBS_DMA_SZ = AHCI_CMD_SLOT_SZ +
99 (AHCI_RX_FIS_SZ * 16),
100 AHCI_IRQ_ON_SG = (1 << 31),
101 AHCI_CMD_ATAPI = (1 << 5),
102 AHCI_CMD_WRITE = (1 << 6),
103 AHCI_CMD_PREFETCH = (1 << 7),
104 AHCI_CMD_RESET = (1 << 8),
105 AHCI_CMD_CLR_BUSY = (1 << 10),
107 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
108 RX_FIS_SDB = 0x58, /* offset of SDB FIS data */
109 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
112 board_ahci_vt8251 = 1,
113 board_ahci_ign_iferr = 2,
114 board_ahci_sb600 = 3,
116 board_ahci_sb700 = 5, /* for SB700 and SB800 */
117 board_ahci_mcp65 = 6,
118 board_ahci_nopmp = 7,
119 board_ahci_yesncq = 8,
120 board_ahci_nosntf = 9,
122 /* global controller registers */
123 HOST_CAP = 0x00, /* host capabilities */
124 HOST_CTL = 0x04, /* global host control */
125 HOST_IRQ_STAT = 0x08, /* interrupt status */
126 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
127 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
128 HOST_EM_LOC = 0x1c, /* Enclosure Management location */
129 HOST_EM_CTL = 0x20, /* Enclosure Management Control */
130 HOST_CAP2 = 0x24, /* host capabilities, extended */
133 HOST_RESET = (1 << 0), /* reset controller; self-clear */
134 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
135 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
138 HOST_CAP_SXS = (1 << 5), /* Supports External SATA */
139 HOST_CAP_EMS = (1 << 6), /* Enclosure Management support */
140 HOST_CAP_CCC = (1 << 7), /* Command Completion Coalescing */
141 HOST_CAP_PART = (1 << 13), /* Partial state capable */
142 HOST_CAP_SSC = (1 << 14), /* Slumber state capable */
143 HOST_CAP_PIO_MULTI = (1 << 15), /* PIO multiple DRQ support */
144 HOST_CAP_FBS = (1 << 16), /* FIS-based switching support */
145 HOST_CAP_PMP = (1 << 17), /* Port Multiplier support */
146 HOST_CAP_ONLY = (1 << 18), /* Supports AHCI mode only */
147 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
148 HOST_CAP_LED = (1 << 25), /* Supports activity LED */
149 HOST_CAP_ALPM = (1 << 26), /* Aggressive Link PM support */
150 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
151 HOST_CAP_MPS = (1 << 28), /* Mechanical presence switch */
152 HOST_CAP_SNTF = (1 << 29), /* SNotification register */
153 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
154 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
157 HOST_CAP2_BOH = (1 << 0), /* BIOS/OS handoff supported */
158 HOST_CAP2_NVMHCI = (1 << 1), /* NVMHCI supported */
159 HOST_CAP2_APST = (1 << 2), /* Automatic partial to slumber */
161 /* registers for each SATA port */
162 PORT_LST_ADDR = 0x00, /* command list DMA addr */
163 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
164 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
165 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
166 PORT_IRQ_STAT = 0x10, /* interrupt status */
167 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
168 PORT_CMD = 0x18, /* port command */
169 PORT_TFDATA = 0x20, /* taskfile data */
170 PORT_SIG = 0x24, /* device TF signature */
171 PORT_CMD_ISSUE = 0x38, /* command issue */
172 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
173 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
174 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
175 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
176 PORT_SCR_NTF = 0x3c, /* SATA phy register: SNotification */
177 PORT_FBS = 0x40, /* FIS-based Switching */
179 /* PORT_IRQ_{STAT,MASK} bits */
180 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
181 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
182 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
183 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
184 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
185 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
186 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
187 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
189 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
190 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
191 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
192 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
193 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
194 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
195 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
196 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
197 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
199 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
205 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
207 PORT_IRQ_HBUS_DATA_ERR,
208 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
209 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
210 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
213 PORT_CMD_ASP = (1 << 27), /* Aggressive Slumber/Partial */
214 PORT_CMD_ALPE = (1 << 26), /* Aggressive Link PM enable */
215 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
216 PORT_CMD_FBSCP = (1 << 22), /* FBS Capable Port */
217 PORT_CMD_PMP = (1 << 17), /* PMP attached */
218 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
219 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
220 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
221 PORT_CMD_CLO = (1 << 3), /* Command list override */
222 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
223 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
224 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
226 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
227 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
228 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
229 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
231 PORT_FBS_DWE_OFFSET = 16, /* FBS device with error offset */
232 PORT_FBS_ADO_OFFSET = 12, /* FBS active dev optimization offset */
233 PORT_FBS_DEV_OFFSET = 8, /* FBS device to issue offset */
234 PORT_FBS_DEV_MASK = (0xf << PORT_FBS_DEV_OFFSET), /* FBS.DEV */
235 PORT_FBS_SDE = (1 << 2), /* FBS single device error */
236 PORT_FBS_DEC = (1 << 1), /* FBS device error clear */
237 PORT_FBS_EN = (1 << 0), /* Enable FBS */
239 /* hpriv->flags bits */
240 AHCI_HFLAG_NO_NCQ = (1 << 0),
241 AHCI_HFLAG_IGN_IRQ_IF_ERR = (1 << 1), /* ignore IRQ_IF_ERR */
242 AHCI_HFLAG_IGN_SERR_INTERNAL = (1 << 2), /* ignore SERR_INTERNAL */
243 AHCI_HFLAG_32BIT_ONLY = (1 << 3), /* force 32bit */
244 AHCI_HFLAG_MV_PATA = (1 << 4), /* PATA port */
245 AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */
246 AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */
247 AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */
248 AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */
249 AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */
250 AHCI_HFLAG_NO_SUSPEND = (1 << 10), /* don't suspend */
251 AHCI_HFLAG_SRST_TOUT_IS_OFFLINE = (1 << 11), /* treat SRST timeout as
253 AHCI_HFLAG_NO_SNTF = (1 << 12), /* no sntf */
257 AHCI_FLAG_COMMON = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
258 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
259 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN |
262 ICH_MAP = 0x90, /* ICH MAP register */
269 EM_CTL_RST = (1 << 9), /* Reset */
270 EM_CTL_TM = (1 << 8), /* Transmit Message */
271 EM_CTL_ALHD = (1 << 26), /* Activity LED */
274 struct ahci_cmd_hdr {
289 struct ahci_em_priv {
290 enum sw_activity blink_policy;
291 struct timer_list timer;
292 unsigned long saved_activity;
293 unsigned long activity;
294 unsigned long led_state;
297 struct ahci_host_priv {
298 void __iomem * mmio; /* bus-independant mem map */
299 unsigned int flags; /* AHCI_HFLAG_* */
300 u32 cap; /* cap to use */
301 u32 cap2; /* cap2 to use */
302 u32 port_map; /* port map to use */
303 u32 saved_cap; /* saved initial cap */
304 u32 saved_cap2; /* saved initial cap2 */
305 u32 saved_port_map; /* saved initial port_map */
306 u32 em_loc; /* enclosure management location */
309 struct ahci_port_priv {
310 struct ata_link *active_link;
311 struct ahci_cmd_hdr *cmd_slot;
312 dma_addr_t cmd_slot_dma;
314 dma_addr_t cmd_tbl_dma;
316 dma_addr_t rx_fis_dma;
317 /* for NCQ spurious interrupt analysis */
318 unsigned int ncq_saw_d2h:1;
319 unsigned int ncq_saw_dmas:1;
320 unsigned int ncq_saw_sdb:1;
321 u32 intr_mask; /* interrupts to enable */
322 bool fbs_supported; /* set iff FBS is supported */
323 bool fbs_enabled; /* set iff FBS is enabled */
324 int fbs_last_dev; /* save FBS.DEV of last FIS */
325 /* enclosure management info per PM slot */
326 struct ahci_em_priv em_priv[EM_MAX_SLOTS];
329 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
330 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
331 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
332 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
333 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
334 static int ahci_port_start(struct ata_port *ap);
335 static void ahci_port_stop(struct ata_port *ap);
336 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc);
337 static void ahci_qc_prep(struct ata_queued_cmd *qc);
338 static void ahci_freeze(struct ata_port *ap);
339 static void ahci_thaw(struct ata_port *ap);
340 static void ahci_enable_fbs(struct ata_port *ap);
341 static void ahci_disable_fbs(struct ata_port *ap);
342 static void ahci_pmp_attach(struct ata_port *ap);
343 static void ahci_pmp_detach(struct ata_port *ap);
344 static int ahci_softreset(struct ata_link *link, unsigned int *class,
345 unsigned long deadline);
346 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
347 unsigned long deadline);
348 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
349 unsigned long deadline);
350 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
351 unsigned long deadline);
352 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
353 unsigned long deadline);
354 static void ahci_postreset(struct ata_link *link, unsigned int *class);
355 static void ahci_error_handler(struct ata_port *ap);
356 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
357 static int ahci_port_resume(struct ata_port *ap);
358 static void ahci_dev_config(struct ata_device *dev);
359 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
362 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg);
363 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg);
364 static int ahci_pci_device_resume(struct pci_dev *pdev);
366 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf);
367 static ssize_t ahci_activity_store(struct ata_device *dev,
368 enum sw_activity val);
369 static void ahci_init_sw_activity(struct ata_link *link);
371 static ssize_t ahci_show_host_caps(struct device *dev,
372 struct device_attribute *attr, char *buf);
373 static ssize_t ahci_show_host_cap2(struct device *dev,
374 struct device_attribute *attr, char *buf);
375 static ssize_t ahci_show_host_version(struct device *dev,
376 struct device_attribute *attr, char *buf);
377 static ssize_t ahci_show_port_cmd(struct device *dev,
378 struct device_attribute *attr, char *buf);
380 static DEVICE_ATTR(ahci_host_caps, S_IRUGO, ahci_show_host_caps, NULL);
381 static DEVICE_ATTR(ahci_host_cap2, S_IRUGO, ahci_show_host_cap2, NULL);
382 static DEVICE_ATTR(ahci_host_version, S_IRUGO, ahci_show_host_version, NULL);
383 static DEVICE_ATTR(ahci_port_cmd, S_IRUGO, ahci_show_port_cmd, NULL);
385 static struct device_attribute *ahci_shost_attrs[] = {
386 &dev_attr_link_power_management_policy,
387 &dev_attr_em_message_type,
388 &dev_attr_em_message,
389 &dev_attr_ahci_host_caps,
390 &dev_attr_ahci_host_cap2,
391 &dev_attr_ahci_host_version,
392 &dev_attr_ahci_port_cmd,
396 static struct device_attribute *ahci_sdev_attrs[] = {
397 &dev_attr_sw_activity,
398 &dev_attr_unload_heads,
402 static struct scsi_host_template ahci_sht = {
403 ATA_NCQ_SHT(DRV_NAME),
404 .can_queue = AHCI_MAX_CMDS - 1,
405 .sg_tablesize = AHCI_MAX_SG,
406 .dma_boundary = AHCI_DMA_BOUNDARY,
407 .shost_attrs = ahci_shost_attrs,
408 .sdev_attrs = ahci_sdev_attrs,
411 static struct ata_port_operations ahci_ops = {
412 .inherits = &sata_pmp_port_ops,
414 .qc_defer = ahci_pmp_qc_defer,
415 .qc_prep = ahci_qc_prep,
416 .qc_issue = ahci_qc_issue,
417 .qc_fill_rtf = ahci_qc_fill_rtf,
419 .freeze = ahci_freeze,
421 .softreset = ahci_softreset,
422 .hardreset = ahci_hardreset,
423 .postreset = ahci_postreset,
424 .pmp_softreset = ahci_softreset,
425 .error_handler = ahci_error_handler,
426 .post_internal_cmd = ahci_post_internal_cmd,
427 .dev_config = ahci_dev_config,
429 .scr_read = ahci_scr_read,
430 .scr_write = ahci_scr_write,
431 .pmp_attach = ahci_pmp_attach,
432 .pmp_detach = ahci_pmp_detach,
434 .enable_pm = ahci_enable_alpm,
435 .disable_pm = ahci_disable_alpm,
436 .em_show = ahci_led_show,
437 .em_store = ahci_led_store,
438 .sw_activity_show = ahci_activity_show,
439 .sw_activity_store = ahci_activity_store,
441 .port_suspend = ahci_port_suspend,
442 .port_resume = ahci_port_resume,
444 .port_start = ahci_port_start,
445 .port_stop = ahci_port_stop,
448 static struct ata_port_operations ahci_vt8251_ops = {
449 .inherits = &ahci_ops,
450 .hardreset = ahci_vt8251_hardreset,
453 static struct ata_port_operations ahci_p5wdh_ops = {
454 .inherits = &ahci_ops,
455 .hardreset = ahci_p5wdh_hardreset,
458 static struct ata_port_operations ahci_sb600_ops = {
459 .inherits = &ahci_ops,
460 .softreset = ahci_sb600_softreset,
461 .pmp_softreset = ahci_sb600_softreset,
464 #define AHCI_HFLAGS(flags) .private_data = (void *)(flags)
466 static const struct ata_port_info ahci_port_info[] = {
469 .flags = AHCI_FLAG_COMMON,
470 .pio_mask = ATA_PIO4,
471 .udma_mask = ATA_UDMA6,
472 .port_ops = &ahci_ops,
474 [board_ahci_vt8251] =
476 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_PMP),
477 .flags = AHCI_FLAG_COMMON,
478 .pio_mask = ATA_PIO4,
479 .udma_mask = ATA_UDMA6,
480 .port_ops = &ahci_vt8251_ops,
482 [board_ahci_ign_iferr] =
484 AHCI_HFLAGS (AHCI_HFLAG_IGN_IRQ_IF_ERR),
485 .flags = AHCI_FLAG_COMMON,
486 .pio_mask = ATA_PIO4,
487 .udma_mask = ATA_UDMA6,
488 .port_ops = &ahci_ops,
492 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL |
493 AHCI_HFLAG_NO_MSI | AHCI_HFLAG_SECT255 |
494 AHCI_HFLAG_32BIT_ONLY),
495 .flags = AHCI_FLAG_COMMON,
496 .pio_mask = ATA_PIO4,
497 .udma_mask = ATA_UDMA6,
498 .port_ops = &ahci_sb600_ops,
502 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ | AHCI_HFLAG_NO_MSI |
503 AHCI_HFLAG_MV_PATA | AHCI_HFLAG_NO_PMP),
504 .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
505 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
506 .pio_mask = ATA_PIO4,
507 .udma_mask = ATA_UDMA6,
508 .port_ops = &ahci_ops,
510 [board_ahci_sb700] = /* for SB700 and SB800 */
512 AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL),
513 .flags = AHCI_FLAG_COMMON,
514 .pio_mask = ATA_PIO4,
515 .udma_mask = ATA_UDMA6,
516 .port_ops = &ahci_sb600_ops,
520 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
521 .flags = AHCI_FLAG_COMMON,
522 .pio_mask = ATA_PIO4,
523 .udma_mask = ATA_UDMA6,
524 .port_ops = &ahci_ops,
528 AHCI_HFLAGS (AHCI_HFLAG_NO_PMP),
529 .flags = AHCI_FLAG_COMMON,
530 .pio_mask = ATA_PIO4,
531 .udma_mask = ATA_UDMA6,
532 .port_ops = &ahci_ops,
534 [board_ahci_yesncq] =
536 AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ),
537 .flags = AHCI_FLAG_COMMON,
538 .pio_mask = ATA_PIO4,
539 .udma_mask = ATA_UDMA6,
540 .port_ops = &ahci_ops,
542 [board_ahci_nosntf] =
544 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
545 .flags = AHCI_FLAG_COMMON,
546 .pio_mask = ATA_PIO4,
547 .udma_mask = ATA_UDMA6,
548 .port_ops = &ahci_ops,
552 static const struct pci_device_id ahci_pci_tbl[] = {
554 { PCI_VDEVICE(INTEL, 0x2652), board_ahci }, /* ICH6 */
555 { PCI_VDEVICE(INTEL, 0x2653), board_ahci }, /* ICH6M */
556 { PCI_VDEVICE(INTEL, 0x27c1), board_ahci }, /* ICH7 */
557 { PCI_VDEVICE(INTEL, 0x27c5), board_ahci }, /* ICH7M */
558 { PCI_VDEVICE(INTEL, 0x27c3), board_ahci }, /* ICH7R */
559 { PCI_VDEVICE(AL, 0x5288), board_ahci_ign_iferr }, /* ULi M5288 */
560 { PCI_VDEVICE(INTEL, 0x2681), board_ahci }, /* ESB2 */
561 { PCI_VDEVICE(INTEL, 0x2682), board_ahci }, /* ESB2 */
562 { PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
563 { PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
564 { PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
565 { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
566 { PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
567 { PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
568 { PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
569 { PCI_VDEVICE(INTEL, 0x2922), board_ahci }, /* ICH9 */
570 { PCI_VDEVICE(INTEL, 0x2923), board_ahci }, /* ICH9 */
571 { PCI_VDEVICE(INTEL, 0x2924), board_ahci }, /* ICH9 */
572 { PCI_VDEVICE(INTEL, 0x2925), board_ahci }, /* ICH9 */
573 { PCI_VDEVICE(INTEL, 0x2927), board_ahci }, /* ICH9 */
574 { PCI_VDEVICE(INTEL, 0x2929), board_ahci }, /* ICH9M */
575 { PCI_VDEVICE(INTEL, 0x292a), board_ahci }, /* ICH9M */
576 { PCI_VDEVICE(INTEL, 0x292b), board_ahci }, /* ICH9M */
577 { PCI_VDEVICE(INTEL, 0x292c), board_ahci }, /* ICH9M */
578 { PCI_VDEVICE(INTEL, 0x292f), board_ahci }, /* ICH9M */
579 { PCI_VDEVICE(INTEL, 0x294d), board_ahci }, /* ICH9 */
580 { PCI_VDEVICE(INTEL, 0x294e), board_ahci }, /* ICH9M */
581 { PCI_VDEVICE(INTEL, 0x502a), board_ahci }, /* Tolapai */
582 { PCI_VDEVICE(INTEL, 0x502b), board_ahci }, /* Tolapai */
583 { PCI_VDEVICE(INTEL, 0x3a05), board_ahci }, /* ICH10 */
584 { PCI_VDEVICE(INTEL, 0x3a22), board_ahci }, /* ICH10 */
585 { PCI_VDEVICE(INTEL, 0x3a25), board_ahci }, /* ICH10 */
586 { PCI_VDEVICE(INTEL, 0x3b22), board_ahci }, /* PCH AHCI */
587 { PCI_VDEVICE(INTEL, 0x3b23), board_ahci }, /* PCH AHCI */
588 { PCI_VDEVICE(INTEL, 0x3b24), board_ahci }, /* PCH RAID */
589 { PCI_VDEVICE(INTEL, 0x3b25), board_ahci }, /* PCH RAID */
590 { PCI_VDEVICE(INTEL, 0x3b29), board_ahci }, /* PCH AHCI */
591 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
592 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
593 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
594 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
595 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
596 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
597 { PCI_VDEVICE(INTEL, 0x1c05), board_ahci }, /* CPT RAID */
598 { PCI_VDEVICE(INTEL, 0x1c06), board_ahci }, /* CPT RAID */
599 { PCI_VDEVICE(INTEL, 0x1c07), board_ahci }, /* CPT RAID */
601 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
602 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
603 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr },
606 { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */
607 { PCI_VDEVICE(ATI, 0x4390), board_ahci_sb700 }, /* ATI SB700/800 */
608 { PCI_VDEVICE(ATI, 0x4391), board_ahci_sb700 }, /* ATI SB700/800 */
609 { PCI_VDEVICE(ATI, 0x4392), board_ahci_sb700 }, /* ATI SB700/800 */
610 { PCI_VDEVICE(ATI, 0x4393), board_ahci_sb700 }, /* ATI SB700/800 */
611 { PCI_VDEVICE(ATI, 0x4394), board_ahci_sb700 }, /* ATI SB700/800 */
612 { PCI_VDEVICE(ATI, 0x4395), board_ahci_sb700 }, /* ATI SB700/800 */
615 { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */
616 /* AMD is using RAID class only for ahci controllers */
617 { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
618 PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci },
621 { PCI_VDEVICE(VIA, 0x3349), board_ahci_vt8251 }, /* VIA VT8251 */
622 { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */
625 { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */
626 { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */
627 { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */
628 { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */
629 { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */
630 { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */
631 { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */
632 { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */
633 { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci_yesncq }, /* MCP67 */
634 { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci_yesncq }, /* MCP67 */
635 { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci_yesncq }, /* MCP67 */
636 { PCI_VDEVICE(NVIDIA, 0x0553), board_ahci_yesncq }, /* MCP67 */
637 { PCI_VDEVICE(NVIDIA, 0x0554), board_ahci_yesncq }, /* MCP67 */
638 { PCI_VDEVICE(NVIDIA, 0x0555), board_ahci_yesncq }, /* MCP67 */
639 { PCI_VDEVICE(NVIDIA, 0x0556), board_ahci_yesncq }, /* MCP67 */
640 { PCI_VDEVICE(NVIDIA, 0x0557), board_ahci_yesncq }, /* MCP67 */
641 { PCI_VDEVICE(NVIDIA, 0x0558), board_ahci_yesncq }, /* MCP67 */
642 { PCI_VDEVICE(NVIDIA, 0x0559), board_ahci_yesncq }, /* MCP67 */
643 { PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq }, /* MCP67 */
644 { PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq }, /* MCP67 */
645 { PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq }, /* Linux ID */
646 { PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq }, /* Linux ID */
647 { PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq }, /* Linux ID */
648 { PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq }, /* Linux ID */
649 { PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq }, /* Linux ID */
650 { PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq }, /* Linux ID */
651 { PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq }, /* Linux ID */
652 { PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq }, /* Linux ID */
653 { PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq }, /* Linux ID */
654 { PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq }, /* Linux ID */
655 { PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq }, /* Linux ID */
656 { PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq }, /* Linux ID */
657 { PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq }, /* Linux ID */
658 { PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq }, /* Linux ID */
659 { PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq }, /* Linux ID */
660 { PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq }, /* Linux ID */
661 { PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq }, /* MCP73 */
662 { PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq }, /* MCP73 */
663 { PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq }, /* MCP73 */
664 { PCI_VDEVICE(NVIDIA, 0x07f3), board_ahci_yesncq }, /* MCP73 */
665 { PCI_VDEVICE(NVIDIA, 0x07f4), board_ahci_yesncq }, /* MCP73 */
666 { PCI_VDEVICE(NVIDIA, 0x07f5), board_ahci_yesncq }, /* MCP73 */
667 { PCI_VDEVICE(NVIDIA, 0x07f6), board_ahci_yesncq }, /* MCP73 */
668 { PCI_VDEVICE(NVIDIA, 0x07f7), board_ahci_yesncq }, /* MCP73 */
669 { PCI_VDEVICE(NVIDIA, 0x07f8), board_ahci_yesncq }, /* MCP73 */
670 { PCI_VDEVICE(NVIDIA, 0x07f9), board_ahci_yesncq }, /* MCP73 */
671 { PCI_VDEVICE(NVIDIA, 0x07fa), board_ahci_yesncq }, /* MCP73 */
672 { PCI_VDEVICE(NVIDIA, 0x07fb), board_ahci_yesncq }, /* MCP73 */
673 { PCI_VDEVICE(NVIDIA, 0x0ad0), board_ahci }, /* MCP77 */
674 { PCI_VDEVICE(NVIDIA, 0x0ad1), board_ahci }, /* MCP77 */
675 { PCI_VDEVICE(NVIDIA, 0x0ad2), board_ahci }, /* MCP77 */
676 { PCI_VDEVICE(NVIDIA, 0x0ad3), board_ahci }, /* MCP77 */
677 { PCI_VDEVICE(NVIDIA, 0x0ad4), board_ahci }, /* MCP77 */
678 { PCI_VDEVICE(NVIDIA, 0x0ad5), board_ahci }, /* MCP77 */
679 { PCI_VDEVICE(NVIDIA, 0x0ad6), board_ahci }, /* MCP77 */
680 { PCI_VDEVICE(NVIDIA, 0x0ad7), board_ahci }, /* MCP77 */
681 { PCI_VDEVICE(NVIDIA, 0x0ad8), board_ahci }, /* MCP77 */
682 { PCI_VDEVICE(NVIDIA, 0x0ad9), board_ahci }, /* MCP77 */
683 { PCI_VDEVICE(NVIDIA, 0x0ada), board_ahci }, /* MCP77 */
684 { PCI_VDEVICE(NVIDIA, 0x0adb), board_ahci }, /* MCP77 */
685 { PCI_VDEVICE(NVIDIA, 0x0ab4), board_ahci }, /* MCP79 */
686 { PCI_VDEVICE(NVIDIA, 0x0ab5), board_ahci }, /* MCP79 */
687 { PCI_VDEVICE(NVIDIA, 0x0ab6), board_ahci }, /* MCP79 */
688 { PCI_VDEVICE(NVIDIA, 0x0ab7), board_ahci }, /* MCP79 */
689 { PCI_VDEVICE(NVIDIA, 0x0ab8), board_ahci }, /* MCP79 */
690 { PCI_VDEVICE(NVIDIA, 0x0ab9), board_ahci }, /* MCP79 */
691 { PCI_VDEVICE(NVIDIA, 0x0aba), board_ahci }, /* MCP79 */
692 { PCI_VDEVICE(NVIDIA, 0x0abb), board_ahci }, /* MCP79 */
693 { PCI_VDEVICE(NVIDIA, 0x0abc), board_ahci }, /* MCP79 */
694 { PCI_VDEVICE(NVIDIA, 0x0abd), board_ahci }, /* MCP79 */
695 { PCI_VDEVICE(NVIDIA, 0x0abe), board_ahci }, /* MCP79 */
696 { PCI_VDEVICE(NVIDIA, 0x0abf), board_ahci }, /* MCP79 */
697 { PCI_VDEVICE(NVIDIA, 0x0d84), board_ahci }, /* MCP89 */
698 { PCI_VDEVICE(NVIDIA, 0x0d85), board_ahci }, /* MCP89 */
699 { PCI_VDEVICE(NVIDIA, 0x0d86), board_ahci }, /* MCP89 */
700 { PCI_VDEVICE(NVIDIA, 0x0d87), board_ahci }, /* MCP89 */
701 { PCI_VDEVICE(NVIDIA, 0x0d88), board_ahci }, /* MCP89 */
702 { PCI_VDEVICE(NVIDIA, 0x0d89), board_ahci }, /* MCP89 */
703 { PCI_VDEVICE(NVIDIA, 0x0d8a), board_ahci }, /* MCP89 */
704 { PCI_VDEVICE(NVIDIA, 0x0d8b), board_ahci }, /* MCP89 */
705 { PCI_VDEVICE(NVIDIA, 0x0d8c), board_ahci }, /* MCP89 */
706 { PCI_VDEVICE(NVIDIA, 0x0d8d), board_ahci }, /* MCP89 */
707 { PCI_VDEVICE(NVIDIA, 0x0d8e), board_ahci }, /* MCP89 */
708 { PCI_VDEVICE(NVIDIA, 0x0d8f), board_ahci }, /* MCP89 */
711 { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */
712 { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
713 { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
716 { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
717 { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
720 { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */
722 /* Generic, PCI class code for AHCI */
723 { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
724 PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci },
726 { } /* terminate list */
730 static struct pci_driver ahci_pci_driver = {
732 .id_table = ahci_pci_tbl,
733 .probe = ahci_init_one,
734 .remove = ata_pci_remove_one,
736 .suspend = ahci_pci_device_suspend,
737 .resume = ahci_pci_device_resume,
741 static int ahci_em_messages = 1;
742 module_param(ahci_em_messages, int, 0444);
743 /* add other LED protocol types when they become supported */
744 MODULE_PARM_DESC(ahci_em_messages,
745 "Set AHCI Enclosure Management Message type (0 = disabled, 1 = LED");
747 #if defined(CONFIG_PATA_MARVELL) || defined(CONFIG_PATA_MARVELL_MODULE)
748 static int marvell_enable;
750 static int marvell_enable = 1;
752 module_param(marvell_enable, int, 0644);
753 MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
756 static inline int ahci_nr_ports(u32 cap)
758 return (cap & 0x1f) + 1;
761 static inline void __iomem *__ahci_port_base(struct ata_host *host,
762 unsigned int port_no)
764 struct ahci_host_priv *hpriv = host->private_data;
765 void __iomem *mmio = hpriv->mmio;
767 return mmio + 0x100 + (port_no * 0x80);
770 static inline void __iomem *ahci_port_base(struct ata_port *ap)
772 return __ahci_port_base(ap->host, ap->port_no);
775 static void ahci_enable_ahci(void __iomem *mmio)
780 /* turn on AHCI_EN */
781 tmp = readl(mmio + HOST_CTL);
782 if (tmp & HOST_AHCI_EN)
785 /* Some controllers need AHCI_EN to be written multiple times.
786 * Try a few times before giving up.
788 for (i = 0; i < 5; i++) {
790 writel(tmp, mmio + HOST_CTL);
791 tmp = readl(mmio + HOST_CTL); /* flush && sanity check */
792 if (tmp & HOST_AHCI_EN)
800 static ssize_t ahci_show_host_caps(struct device *dev,
801 struct device_attribute *attr, char *buf)
803 struct Scsi_Host *shost = class_to_shost(dev);
804 struct ata_port *ap = ata_shost_to_port(shost);
805 struct ahci_host_priv *hpriv = ap->host->private_data;
807 return sprintf(buf, "%x\n", hpriv->cap);
810 static ssize_t ahci_show_host_cap2(struct device *dev,
811 struct device_attribute *attr, char *buf)
813 struct Scsi_Host *shost = class_to_shost(dev);
814 struct ata_port *ap = ata_shost_to_port(shost);
815 struct ahci_host_priv *hpriv = ap->host->private_data;
817 return sprintf(buf, "%x\n", hpriv->cap2);
820 static ssize_t ahci_show_host_version(struct device *dev,
821 struct device_attribute *attr, char *buf)
823 struct Scsi_Host *shost = class_to_shost(dev);
824 struct ata_port *ap = ata_shost_to_port(shost);
825 struct ahci_host_priv *hpriv = ap->host->private_data;
826 void __iomem *mmio = hpriv->mmio;
828 return sprintf(buf, "%x\n", readl(mmio + HOST_VERSION));
831 static ssize_t ahci_show_port_cmd(struct device *dev,
832 struct device_attribute *attr, char *buf)
834 struct Scsi_Host *shost = class_to_shost(dev);
835 struct ata_port *ap = ata_shost_to_port(shost);
836 void __iomem *port_mmio = ahci_port_base(ap);
838 return sprintf(buf, "%x\n", readl(port_mmio + PORT_CMD));
842 * ahci_save_initial_config - Save and fixup initial config values
843 * @pdev: target PCI device
844 * @hpriv: host private area to store config values
846 * Some registers containing configuration info might be setup by
847 * BIOS and might be cleared on reset. This function saves the
848 * initial values of those registers into @hpriv such that they
849 * can be restored after controller reset.
851 * If inconsistent, config values are fixed up by this function.
856 static void ahci_save_initial_config(struct pci_dev *pdev,
857 struct ahci_host_priv *hpriv)
859 void __iomem *mmio = hpriv->mmio;
860 u32 cap, cap2, vers, port_map;
864 /* make sure AHCI mode is enabled before accessing CAP */
865 ahci_enable_ahci(mmio);
867 /* Values prefixed with saved_ are written back to host after
868 * reset. Values without are used for driver operation.
870 hpriv->saved_cap = cap = readl(mmio + HOST_CAP);
871 hpriv->saved_port_map = port_map = readl(mmio + HOST_PORTS_IMPL);
873 /* CAP2 register is only defined for AHCI 1.2 and later */
874 vers = readl(mmio + HOST_VERSION);
875 if ((vers >> 16) > 1 ||
876 ((vers >> 16) == 1 && (vers & 0xFFFF) >= 0x200))
877 hpriv->saved_cap2 = cap2 = readl(mmio + HOST_CAP2);
879 hpriv->saved_cap2 = cap2 = 0;
881 /* some chips have errata preventing 64bit use */
882 if ((cap & HOST_CAP_64) && (hpriv->flags & AHCI_HFLAG_32BIT_ONLY)) {
883 dev_printk(KERN_INFO, &pdev->dev,
884 "controller can't do 64bit DMA, forcing 32bit\n");
888 if ((cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_NO_NCQ)) {
889 dev_printk(KERN_INFO, &pdev->dev,
890 "controller can't do NCQ, turning off CAP_NCQ\n");
891 cap &= ~HOST_CAP_NCQ;
894 if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) {
895 dev_printk(KERN_INFO, &pdev->dev,
896 "controller can do NCQ, turning on CAP_NCQ\n");
900 if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) {
901 dev_printk(KERN_INFO, &pdev->dev,
902 "controller can't do PMP, turning off CAP_PMP\n");
903 cap &= ~HOST_CAP_PMP;
906 if ((cap & HOST_CAP_SNTF) && (hpriv->flags & AHCI_HFLAG_NO_SNTF)) {
907 dev_printk(KERN_INFO, &pdev->dev,
908 "controller can't do SNTF, turning off CAP_SNTF\n");
909 cap &= ~HOST_CAP_SNTF;
912 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 &&
914 dev_printk(KERN_INFO, &pdev->dev,
915 "JMB361 has only one port, port_map 0x%x -> 0x%x\n",
921 * Temporary Marvell 6145 hack: PATA port presence
922 * is asserted through the standard AHCI port
923 * presence register, as bit 4 (counting from 0)
925 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
926 if (pdev->device == 0x6121)
930 dev_printk(KERN_ERR, &pdev->dev,
931 "MV_AHCI HACK: port_map %x -> %x\n",
934 dev_printk(KERN_ERR, &pdev->dev,
935 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
940 /* cross check port_map and cap.n_ports */
944 for (i = 0; i < AHCI_MAX_PORTS; i++)
945 if (port_map & (1 << i))
948 /* If PI has more ports than n_ports, whine, clear
949 * port_map and let it be generated from n_ports.
951 if (map_ports > ahci_nr_ports(cap)) {
952 dev_printk(KERN_WARNING, &pdev->dev,
953 "implemented port map (0x%x) contains more "
954 "ports than nr_ports (%u), using nr_ports\n",
955 port_map, ahci_nr_ports(cap));
960 /* fabricate port_map from cap.nr_ports */
962 port_map = (1 << ahci_nr_ports(cap)) - 1;
963 dev_printk(KERN_WARNING, &pdev->dev,
964 "forcing PORTS_IMPL to 0x%x\n", port_map);
966 /* write the fixed up value to the PI register */
967 hpriv->saved_port_map = port_map;
970 /* record values to use during operation */
973 hpriv->port_map = port_map;
977 * ahci_restore_initial_config - Restore initial config
978 * @host: target ATA host
980 * Restore initial config stored by ahci_save_initial_config().
985 static void ahci_restore_initial_config(struct ata_host *host)
987 struct ahci_host_priv *hpriv = host->private_data;
988 void __iomem *mmio = hpriv->mmio;
990 writel(hpriv->saved_cap, mmio + HOST_CAP);
991 if (hpriv->saved_cap2)
992 writel(hpriv->saved_cap2, mmio + HOST_CAP2);
993 writel(hpriv->saved_port_map, mmio + HOST_PORTS_IMPL);
994 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
997 static unsigned ahci_scr_offset(struct ata_port *ap, unsigned int sc_reg)
999 static const int offset[] = {
1000 [SCR_STATUS] = PORT_SCR_STAT,
1001 [SCR_CONTROL] = PORT_SCR_CTL,
1002 [SCR_ERROR] = PORT_SCR_ERR,
1003 [SCR_ACTIVE] = PORT_SCR_ACT,
1004 [SCR_NOTIFICATION] = PORT_SCR_NTF,
1006 struct ahci_host_priv *hpriv = ap->host->private_data;
1008 if (sc_reg < ARRAY_SIZE(offset) &&
1009 (sc_reg != SCR_NOTIFICATION || (hpriv->cap & HOST_CAP_SNTF)))
1010 return offset[sc_reg];
1014 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
1016 void __iomem *port_mmio = ahci_port_base(link->ap);
1017 int offset = ahci_scr_offset(link->ap, sc_reg);
1020 *val = readl(port_mmio + offset);
1026 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
1028 void __iomem *port_mmio = ahci_port_base(link->ap);
1029 int offset = ahci_scr_offset(link->ap, sc_reg);
1032 writel(val, port_mmio + offset);
1038 static void ahci_start_engine(struct ata_port *ap)
1040 void __iomem *port_mmio = ahci_port_base(ap);
1044 tmp = readl(port_mmio + PORT_CMD);
1045 tmp |= PORT_CMD_START;
1046 writel(tmp, port_mmio + PORT_CMD);
1047 readl(port_mmio + PORT_CMD); /* flush */
1050 static int ahci_stop_engine(struct ata_port *ap)
1052 void __iomem *port_mmio = ahci_port_base(ap);
1055 tmp = readl(port_mmio + PORT_CMD);
1057 /* check if the HBA is idle */
1058 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
1061 /* setting HBA to idle */
1062 tmp &= ~PORT_CMD_START;
1063 writel(tmp, port_mmio + PORT_CMD);
1065 /* wait for engine to stop. This could be as long as 500 msec */
1066 tmp = ata_wait_register(port_mmio + PORT_CMD,
1067 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
1068 if (tmp & PORT_CMD_LIST_ON)
1074 static void ahci_start_fis_rx(struct ata_port *ap)
1076 void __iomem *port_mmio = ahci_port_base(ap);
1077 struct ahci_host_priv *hpriv = ap->host->private_data;
1078 struct ahci_port_priv *pp = ap->private_data;
1081 /* set FIS registers */
1082 if (hpriv->cap & HOST_CAP_64)
1083 writel((pp->cmd_slot_dma >> 16) >> 16,
1084 port_mmio + PORT_LST_ADDR_HI);
1085 writel(pp->cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
1087 if (hpriv->cap & HOST_CAP_64)
1088 writel((pp->rx_fis_dma >> 16) >> 16,
1089 port_mmio + PORT_FIS_ADDR_HI);
1090 writel(pp->rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
1092 /* enable FIS reception */
1093 tmp = readl(port_mmio + PORT_CMD);
1094 tmp |= PORT_CMD_FIS_RX;
1095 writel(tmp, port_mmio + PORT_CMD);
1098 readl(port_mmio + PORT_CMD);
1101 static int ahci_stop_fis_rx(struct ata_port *ap)
1103 void __iomem *port_mmio = ahci_port_base(ap);
1106 /* disable FIS reception */
1107 tmp = readl(port_mmio + PORT_CMD);
1108 tmp &= ~PORT_CMD_FIS_RX;
1109 writel(tmp, port_mmio + PORT_CMD);
1111 /* wait for completion, spec says 500ms, give it 1000 */
1112 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
1113 PORT_CMD_FIS_ON, 10, 1000);
1114 if (tmp & PORT_CMD_FIS_ON)
1120 static void ahci_power_up(struct ata_port *ap)
1122 struct ahci_host_priv *hpriv = ap->host->private_data;
1123 void __iomem *port_mmio = ahci_port_base(ap);
1126 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1128 /* spin up device */
1129 if (hpriv->cap & HOST_CAP_SSS) {
1130 cmd |= PORT_CMD_SPIN_UP;
1131 writel(cmd, port_mmio + PORT_CMD);
1135 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
1138 static void ahci_disable_alpm(struct ata_port *ap)
1140 struct ahci_host_priv *hpriv = ap->host->private_data;
1141 void __iomem *port_mmio = ahci_port_base(ap);
1143 struct ahci_port_priv *pp = ap->private_data;
1145 /* IPM bits should be disabled by libata-core */
1146 /* get the existing command bits */
1147 cmd = readl(port_mmio + PORT_CMD);
1149 /* disable ALPM and ASP */
1150 cmd &= ~PORT_CMD_ASP;
1151 cmd &= ~PORT_CMD_ALPE;
1153 /* force the interface back to active */
1154 cmd |= PORT_CMD_ICC_ACTIVE;
1156 /* write out new cmd value */
1157 writel(cmd, port_mmio + PORT_CMD);
1158 cmd = readl(port_mmio + PORT_CMD);
1160 /* wait 10ms to be sure we've come out of any low power state */
1163 /* clear out any PhyRdy stuff from interrupt status */
1164 writel(PORT_IRQ_PHYRDY, port_mmio + PORT_IRQ_STAT);
1166 /* go ahead and clean out PhyRdy Change from Serror too */
1167 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
1170 * Clear flag to indicate that we should ignore all PhyRdy
1173 hpriv->flags &= ~AHCI_HFLAG_NO_HOTPLUG;
1176 * Enable interrupts on Phy Ready.
1178 pp->intr_mask |= PORT_IRQ_PHYRDY;
1179 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1182 * don't change the link pm policy - we can be called
1183 * just to turn of link pm temporarily
1187 static int ahci_enable_alpm(struct ata_port *ap,
1188 enum link_pm policy)
1190 struct ahci_host_priv *hpriv = ap->host->private_data;
1191 void __iomem *port_mmio = ahci_port_base(ap);
1193 struct ahci_port_priv *pp = ap->private_data;
1196 /* Make sure the host is capable of link power management */
1197 if (!(hpriv->cap & HOST_CAP_ALPM))
1201 case MAX_PERFORMANCE:
1204 * if we came here with NOT_AVAILABLE,
1205 * it just means this is the first time we
1206 * have tried to enable - default to max performance,
1207 * and let the user go to lower power modes on request.
1209 ahci_disable_alpm(ap);
1212 /* configure HBA to enter SLUMBER */
1216 /* configure HBA to enter PARTIAL */
1224 * Disable interrupts on Phy Ready. This keeps us from
1225 * getting woken up due to spurious phy ready interrupts
1226 * TBD - Hot plug should be done via polling now, is
1227 * that even supported?
1229 pp->intr_mask &= ~PORT_IRQ_PHYRDY;
1230 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
1233 * Set a flag to indicate that we should ignore all PhyRdy
1234 * state changes since these can happen now whenever we
1237 hpriv->flags |= AHCI_HFLAG_NO_HOTPLUG;
1239 /* get the existing command bits */
1240 cmd = readl(port_mmio + PORT_CMD);
1243 * Set ASP based on Policy
1248 * Setting this bit will instruct the HBA to aggressively
1249 * enter a lower power link state when it's appropriate and
1250 * based on the value set above for ASP
1252 cmd |= PORT_CMD_ALPE;
1254 /* write out new cmd value */
1255 writel(cmd, port_mmio + PORT_CMD);
1256 cmd = readl(port_mmio + PORT_CMD);
1258 /* IPM bits should be set by libata-core */
1263 static void ahci_power_down(struct ata_port *ap)
1265 struct ahci_host_priv *hpriv = ap->host->private_data;
1266 void __iomem *port_mmio = ahci_port_base(ap);
1269 if (!(hpriv->cap & HOST_CAP_SSS))
1272 /* put device into listen mode, first set PxSCTL.DET to 0 */
1273 scontrol = readl(port_mmio + PORT_SCR_CTL);
1275 writel(scontrol, port_mmio + PORT_SCR_CTL);
1277 /* then set PxCMD.SUD to 0 */
1278 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
1279 cmd &= ~PORT_CMD_SPIN_UP;
1280 writel(cmd, port_mmio + PORT_CMD);
1284 static void ahci_start_port(struct ata_port *ap)
1286 struct ahci_port_priv *pp = ap->private_data;
1287 struct ata_link *link;
1288 struct ahci_em_priv *emp;
1292 /* enable FIS reception */
1293 ahci_start_fis_rx(ap);
1296 ahci_start_engine(ap);
1299 if (ap->flags & ATA_FLAG_EM) {
1300 ata_for_each_link(link, ap, EDGE) {
1301 emp = &pp->em_priv[link->pmp];
1303 /* EM Transmit bit maybe busy during init */
1304 for (i = 0; i < EM_MAX_RETRY; i++) {
1305 rc = ahci_transmit_led_message(ap,
1316 if (ap->flags & ATA_FLAG_SW_ACTIVITY)
1317 ata_for_each_link(link, ap, EDGE)
1318 ahci_init_sw_activity(link);
1322 static int ahci_deinit_port(struct ata_port *ap, const char **emsg)
1327 rc = ahci_stop_engine(ap);
1329 *emsg = "failed to stop engine";
1333 /* disable FIS reception */
1334 rc = ahci_stop_fis_rx(ap);
1336 *emsg = "failed stop FIS RX";
1343 static int ahci_reset_controller(struct ata_host *host)
1345 struct pci_dev *pdev = to_pci_dev(host->dev);
1346 struct ahci_host_priv *hpriv = host->private_data;
1347 void __iomem *mmio = hpriv->mmio;
1350 /* we must be in AHCI mode, before using anything
1351 * AHCI-specific, such as HOST_RESET.
1353 ahci_enable_ahci(mmio);
1355 /* global controller reset */
1356 if (!ahci_skip_host_reset) {
1357 tmp = readl(mmio + HOST_CTL);
1358 if ((tmp & HOST_RESET) == 0) {
1359 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1360 readl(mmio + HOST_CTL); /* flush */
1364 * to perform host reset, OS should set HOST_RESET
1365 * and poll until this bit is read to be "0".
1366 * reset must complete within 1 second, or
1367 * the hardware should be considered fried.
1369 tmp = ata_wait_register(mmio + HOST_CTL, HOST_RESET,
1370 HOST_RESET, 10, 1000);
1372 if (tmp & HOST_RESET) {
1373 dev_printk(KERN_ERR, host->dev,
1374 "controller reset failed (0x%x)\n", tmp);
1378 /* turn on AHCI mode */
1379 ahci_enable_ahci(mmio);
1381 /* Some registers might be cleared on reset. Restore
1384 ahci_restore_initial_config(host);
1386 dev_printk(KERN_INFO, host->dev,
1387 "skipping global host reset\n");
1389 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1393 pci_read_config_word(pdev, 0x92, &tmp16);
1394 if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1395 tmp16 |= hpriv->port_map;
1396 pci_write_config_word(pdev, 0x92, tmp16);
1403 static void ahci_sw_activity(struct ata_link *link)
1405 struct ata_port *ap = link->ap;
1406 struct ahci_port_priv *pp = ap->private_data;
1407 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1409 if (!(link->flags & ATA_LFLAG_SW_ACTIVITY))
1413 if (!timer_pending(&emp->timer))
1414 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10));
1417 static void ahci_sw_activity_blink(unsigned long arg)
1419 struct ata_link *link = (struct ata_link *)arg;
1420 struct ata_port *ap = link->ap;
1421 struct ahci_port_priv *pp = ap->private_data;
1422 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1423 unsigned long led_message = emp->led_state;
1424 u32 activity_led_state;
1425 unsigned long flags;
1427 led_message &= EM_MSG_LED_VALUE;
1428 led_message |= ap->port_no | (link->pmp << 8);
1430 /* check to see if we've had activity. If so,
1431 * toggle state of LED and reset timer. If not,
1432 * turn LED to desired idle state.
1434 spin_lock_irqsave(ap->lock, flags);
1435 if (emp->saved_activity != emp->activity) {
1436 emp->saved_activity = emp->activity;
1437 /* get the current LED state */
1438 activity_led_state = led_message & EM_MSG_LED_VALUE_ON;
1440 if (activity_led_state)
1441 activity_led_state = 0;
1443 activity_led_state = 1;
1445 /* clear old state */
1446 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1449 led_message |= (activity_led_state << 16);
1450 mod_timer(&emp->timer, jiffies + msecs_to_jiffies(100));
1452 /* switch to idle */
1453 led_message &= ~EM_MSG_LED_VALUE_ACTIVITY;
1454 if (emp->blink_policy == BLINK_OFF)
1455 led_message |= (1 << 16);
1457 spin_unlock_irqrestore(ap->lock, flags);
1458 ahci_transmit_led_message(ap, led_message, 4);
1461 static void ahci_init_sw_activity(struct ata_link *link)
1463 struct ata_port *ap = link->ap;
1464 struct ahci_port_priv *pp = ap->private_data;
1465 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1467 /* init activity stats, setup timer */
1468 emp->saved_activity = emp->activity = 0;
1469 setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link);
1471 /* check our blink policy and set flag for link if it's enabled */
1472 if (emp->blink_policy)
1473 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1476 static int ahci_reset_em(struct ata_host *host)
1478 struct ahci_host_priv *hpriv = host->private_data;
1479 void __iomem *mmio = hpriv->mmio;
1482 em_ctl = readl(mmio + HOST_EM_CTL);
1483 if ((em_ctl & EM_CTL_TM) || (em_ctl & EM_CTL_RST))
1486 writel(em_ctl | EM_CTL_RST, mmio + HOST_EM_CTL);
1490 static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
1493 struct ahci_host_priv *hpriv = ap->host->private_data;
1494 struct ahci_port_priv *pp = ap->private_data;
1495 void __iomem *mmio = hpriv->mmio;
1497 u32 message[] = {0, 0};
1498 unsigned long flags;
1500 struct ahci_em_priv *emp;
1502 /* get the slot number from the message */
1503 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1504 if (pmp < EM_MAX_SLOTS)
1505 emp = &pp->em_priv[pmp];
1509 spin_lock_irqsave(ap->lock, flags);
1512 * if we are still busy transmitting a previous message,
1515 em_ctl = readl(mmio + HOST_EM_CTL);
1516 if (em_ctl & EM_CTL_TM) {
1517 spin_unlock_irqrestore(ap->lock, flags);
1522 * create message header - this is all zero except for
1523 * the message size, which is 4 bytes.
1525 message[0] |= (4 << 8);
1527 /* ignore 0:4 of byte zero, fill in port info yourself */
1528 message[1] = ((state & ~EM_MSG_LED_HBA_PORT) | ap->port_no);
1530 /* write message to EM_LOC */
1531 writel(message[0], mmio + hpriv->em_loc);
1532 writel(message[1], mmio + hpriv->em_loc+4);
1534 /* save off new led state for port/slot */
1535 emp->led_state = state;
1538 * tell hardware to transmit the message
1540 writel(em_ctl | EM_CTL_TM, mmio + HOST_EM_CTL);
1542 spin_unlock_irqrestore(ap->lock, flags);
1546 static ssize_t ahci_led_show(struct ata_port *ap, char *buf)
1548 struct ahci_port_priv *pp = ap->private_data;
1549 struct ata_link *link;
1550 struct ahci_em_priv *emp;
1553 ata_for_each_link(link, ap, EDGE) {
1554 emp = &pp->em_priv[link->pmp];
1555 rc += sprintf(buf, "%lx\n", emp->led_state);
1560 static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
1565 struct ahci_port_priv *pp = ap->private_data;
1566 struct ahci_em_priv *emp;
1568 state = simple_strtoul(buf, NULL, 0);
1570 /* get the slot number from the message */
1571 pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
1572 if (pmp < EM_MAX_SLOTS)
1573 emp = &pp->em_priv[pmp];
1577 /* mask off the activity bits if we are in sw_activity
1578 * mode, user should turn off sw_activity before setting
1579 * activity led through em_message
1581 if (emp->blink_policy)
1582 state &= ~EM_MSG_LED_VALUE_ACTIVITY;
1584 return ahci_transmit_led_message(ap, state, size);
1587 static ssize_t ahci_activity_store(struct ata_device *dev, enum sw_activity val)
1589 struct ata_link *link = dev->link;
1590 struct ata_port *ap = link->ap;
1591 struct ahci_port_priv *pp = ap->private_data;
1592 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1593 u32 port_led_state = emp->led_state;
1595 /* save the desired Activity LED behavior */
1598 link->flags &= ~(ATA_LFLAG_SW_ACTIVITY);
1600 /* set the LED to OFF */
1601 port_led_state &= EM_MSG_LED_VALUE_OFF;
1602 port_led_state |= (ap->port_no | (link->pmp << 8));
1603 ahci_transmit_led_message(ap, port_led_state, 4);
1605 link->flags |= ATA_LFLAG_SW_ACTIVITY;
1606 if (val == BLINK_OFF) {
1607 /* set LED to ON for idle */
1608 port_led_state &= EM_MSG_LED_VALUE_OFF;
1609 port_led_state |= (ap->port_no | (link->pmp << 8));
1610 port_led_state |= EM_MSG_LED_VALUE_ON; /* check this */
1611 ahci_transmit_led_message(ap, port_led_state, 4);
1614 emp->blink_policy = val;
1618 static ssize_t ahci_activity_show(struct ata_device *dev, char *buf)
1620 struct ata_link *link = dev->link;
1621 struct ata_port *ap = link->ap;
1622 struct ahci_port_priv *pp = ap->private_data;
1623 struct ahci_em_priv *emp = &pp->em_priv[link->pmp];
1625 /* display the saved value of activity behavior for this
1628 return sprintf(buf, "%d\n", emp->blink_policy);
1631 static void ahci_port_init(struct pci_dev *pdev, struct ata_port *ap,
1632 int port_no, void __iomem *mmio,
1633 void __iomem *port_mmio)
1635 const char *emsg = NULL;
1639 /* make sure port is not active */
1640 rc = ahci_deinit_port(ap, &emsg);
1642 dev_printk(KERN_WARNING, &pdev->dev,
1643 "%s (%d)\n", emsg, rc);
1646 tmp = readl(port_mmio + PORT_SCR_ERR);
1647 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1648 writel(tmp, port_mmio + PORT_SCR_ERR);
1650 /* clear port IRQ */
1651 tmp = readl(port_mmio + PORT_IRQ_STAT);
1652 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1654 writel(tmp, port_mmio + PORT_IRQ_STAT);
1656 writel(1 << port_no, mmio + HOST_IRQ_STAT);
1659 static void ahci_init_controller(struct ata_host *host)
1661 struct ahci_host_priv *hpriv = host->private_data;
1662 struct pci_dev *pdev = to_pci_dev(host->dev);
1663 void __iomem *mmio = hpriv->mmio;
1665 void __iomem *port_mmio;
1669 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
1670 if (pdev->device == 0x6121)
1674 port_mmio = __ahci_port_base(host, mv);
1676 writel(0, port_mmio + PORT_IRQ_MASK);
1678 /* clear port IRQ */
1679 tmp = readl(port_mmio + PORT_IRQ_STAT);
1680 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1682 writel(tmp, port_mmio + PORT_IRQ_STAT);
1685 for (i = 0; i < host->n_ports; i++) {
1686 struct ata_port *ap = host->ports[i];
1688 port_mmio = ahci_port_base(ap);
1689 if (ata_port_is_dummy(ap))
1692 ahci_port_init(pdev, ap, i, mmio, port_mmio);
1695 tmp = readl(mmio + HOST_CTL);
1696 VPRINTK("HOST_CTL 0x%x\n", tmp);
1697 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1698 tmp = readl(mmio + HOST_CTL);
1699 VPRINTK("HOST_CTL 0x%x\n", tmp);
1702 static void ahci_dev_config(struct ata_device *dev)
1704 struct ahci_host_priv *hpriv = dev->link->ap->host->private_data;
1706 if (hpriv->flags & AHCI_HFLAG_SECT255) {
1707 dev->max_sectors = 255;
1708 ata_dev_printk(dev, KERN_INFO,
1709 "SB600 AHCI: limiting to 255 sectors per cmd\n");
1713 static unsigned int ahci_dev_classify(struct ata_port *ap)
1715 void __iomem *port_mmio = ahci_port_base(ap);
1716 struct ata_taskfile tf;
1719 tmp = readl(port_mmio + PORT_SIG);
1720 tf.lbah = (tmp >> 24) & 0xff;
1721 tf.lbam = (tmp >> 16) & 0xff;
1722 tf.lbal = (tmp >> 8) & 0xff;
1723 tf.nsect = (tmp) & 0xff;
1725 return ata_dev_classify(&tf);
1728 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
1731 dma_addr_t cmd_tbl_dma;
1733 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
1735 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
1736 pp->cmd_slot[tag].status = 0;
1737 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
1738 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
1741 static int ahci_kick_engine(struct ata_port *ap)
1743 void __iomem *port_mmio = ahci_port_base(ap);
1744 struct ahci_host_priv *hpriv = ap->host->private_data;
1745 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1750 rc = ahci_stop_engine(ap);
1755 * always do CLO if PMP is attached (AHCI-1.3 9.2)
1757 busy = status & (ATA_BUSY | ATA_DRQ);
1758 if (!busy && !sata_pmp_attached(ap)) {
1763 if (!(hpriv->cap & HOST_CAP_CLO)) {
1769 tmp = readl(port_mmio + PORT_CMD);
1770 tmp |= PORT_CMD_CLO;
1771 writel(tmp, port_mmio + PORT_CMD);
1774 tmp = ata_wait_register(port_mmio + PORT_CMD,
1775 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
1776 if (tmp & PORT_CMD_CLO)
1779 /* restart engine */
1781 ahci_start_engine(ap);
1785 static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
1786 struct ata_taskfile *tf, int is_cmd, u16 flags,
1787 unsigned long timeout_msec)
1789 const u32 cmd_fis_len = 5; /* five dwords */
1790 struct ahci_port_priv *pp = ap->private_data;
1791 void __iomem *port_mmio = ahci_port_base(ap);
1792 u8 *fis = pp->cmd_tbl;
1795 /* prep the command */
1796 ata_tf_to_fis(tf, pmp, is_cmd, fis);
1797 ahci_fill_cmd_slot(pp, 0, cmd_fis_len | flags | (pmp << 12));
1800 writel(1, port_mmio + PORT_CMD_ISSUE);
1803 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1,
1806 ahci_kick_engine(ap);
1810 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1815 static int ahci_do_softreset(struct ata_link *link, unsigned int *class,
1816 int pmp, unsigned long deadline,
1817 int (*check_ready)(struct ata_link *link))
1819 struct ata_port *ap = link->ap;
1820 struct ahci_host_priv *hpriv = ap->host->private_data;
1821 const char *reason = NULL;
1822 unsigned long now, msecs;
1823 struct ata_taskfile tf;
1828 /* prepare for SRST (AHCI-1.1 10.4.1) */
1829 rc = ahci_kick_engine(ap);
1830 if (rc && rc != -EOPNOTSUPP)
1831 ata_link_printk(link, KERN_WARNING,
1832 "failed to reset engine (errno=%d)\n", rc);
1834 ata_tf_init(link->device, &tf);
1836 /* issue the first D2H Register FIS */
1839 if (time_after(now, deadline))
1840 msecs = jiffies_to_msecs(deadline - now);
1843 if (ahci_exec_polled_cmd(ap, pmp, &tf, 0,
1844 AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY, msecs)) {
1846 reason = "1st FIS failed";
1850 /* spec says at least 5us, but be generous and sleep for 1ms */
1853 /* issue the second D2H Register FIS */
1854 tf.ctl &= ~ATA_SRST;
1855 ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0);
1857 /* wait for link to become ready */
1858 rc = ata_wait_after_reset(link, deadline, check_ready);
1859 if (rc == -EBUSY && hpriv->flags & AHCI_HFLAG_SRST_TOUT_IS_OFFLINE) {
1861 * Workaround for cases where link online status can't
1862 * be trusted. Treat device readiness timeout as link
1865 ata_link_printk(link, KERN_INFO,
1866 "device not ready, treating as offline\n");
1867 *class = ATA_DEV_NONE;
1869 /* link occupied, -ENODEV too is an error */
1870 reason = "device not ready";
1873 *class = ahci_dev_classify(ap);
1875 DPRINTK("EXIT, class=%u\n", *class);
1879 ata_link_printk(link, KERN_ERR, "softreset failed (%s)\n", reason);
1883 static int ahci_check_ready(struct ata_link *link)
1885 void __iomem *port_mmio = ahci_port_base(link->ap);
1886 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1888 return ata_check_ready(status);
1891 static int ahci_softreset(struct ata_link *link, unsigned int *class,
1892 unsigned long deadline)
1894 int pmp = sata_srst_pmp(link);
1898 return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready);
1901 static int ahci_sb600_check_ready(struct ata_link *link)
1903 void __iomem *port_mmio = ahci_port_base(link->ap);
1904 u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF;
1905 u32 irq_status = readl(port_mmio + PORT_IRQ_STAT);
1908 * There is no need to check TFDATA if BAD PMP is found due to HW bug,
1909 * which can save timeout delay.
1911 if (irq_status & PORT_IRQ_BAD_PMP)
1914 return ata_check_ready(status);
1917 static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class,
1918 unsigned long deadline)
1920 struct ata_port *ap = link->ap;
1921 void __iomem *port_mmio = ahci_port_base(ap);
1922 int pmp = sata_srst_pmp(link);
1928 rc = ahci_do_softreset(link, class, pmp, deadline,
1929 ahci_sb600_check_ready);
1932 * Soft reset fails on some ATI chips with IPMS set when PMP
1933 * is enabled but SATA HDD/ODD is connected to SATA port,
1934 * do soft reset again to port 0.
1937 irq_sts = readl(port_mmio + PORT_IRQ_STAT);
1938 if (irq_sts & PORT_IRQ_BAD_PMP) {
1939 ata_link_printk(link, KERN_WARNING,
1940 "applying SB600 PMP SRST workaround "
1942 rc = ahci_do_softreset(link, class, 0, deadline,
1950 static int ahci_hardreset(struct ata_link *link, unsigned int *class,
1951 unsigned long deadline)
1953 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
1954 struct ata_port *ap = link->ap;
1955 struct ahci_port_priv *pp = ap->private_data;
1956 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
1957 struct ata_taskfile tf;
1963 ahci_stop_engine(ap);
1965 /* clear D2H reception area to properly wait for D2H FIS */
1966 ata_tf_init(link->device, &tf);
1968 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
1970 rc = sata_link_hardreset(link, timing, deadline, &online,
1973 ahci_start_engine(ap);
1976 *class = ahci_dev_classify(ap);
1978 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
1982 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
1983 unsigned long deadline)
1985 struct ata_port *ap = link->ap;
1991 ahci_stop_engine(ap);
1993 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
1994 deadline, &online, NULL);
1996 ahci_start_engine(ap);
1998 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
2000 /* vt8251 doesn't clear BSY on signature FIS reception,
2001 * request follow-up softreset.
2003 return online ? -EAGAIN : rc;
2006 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
2007 unsigned long deadline)
2009 struct ata_port *ap = link->ap;
2010 struct ahci_port_priv *pp = ap->private_data;
2011 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2012 struct ata_taskfile tf;
2016 ahci_stop_engine(ap);
2018 /* clear D2H reception area to properly wait for D2H FIS */
2019 ata_tf_init(link->device, &tf);
2021 ata_tf_to_fis(&tf, 0, 0, d2h_fis);
2023 rc = sata_link_hardreset(link, sata_ehc_deb_timing(&link->eh_context),
2024 deadline, &online, NULL);
2026 ahci_start_engine(ap);
2028 /* The pseudo configuration device on SIMG4726 attached to
2029 * ASUS P5W-DH Deluxe doesn't send signature FIS after
2030 * hardreset if no device is attached to the first downstream
2031 * port && the pseudo device locks up on SRST w/ PMP==0. To
2032 * work around this, wait for !BSY only briefly. If BSY isn't
2033 * cleared, perform CLO and proceed to IDENTIFY (achieved by
2034 * ATA_LFLAG_NO_SRST and ATA_LFLAG_ASSUME_ATA).
2036 * Wait for two seconds. Devices attached to downstream port
2037 * which can't process the following IDENTIFY after this will
2038 * have to be reset again. For most cases, this should
2039 * suffice while making probing snappish enough.
2042 rc = ata_wait_after_reset(link, jiffies + 2 * HZ,
2045 ahci_kick_engine(ap);
2050 static void ahci_postreset(struct ata_link *link, unsigned int *class)
2052 struct ata_port *ap = link->ap;
2053 void __iomem *port_mmio = ahci_port_base(ap);
2056 ata_std_postreset(link, class);
2058 /* Make sure port's ATAPI bit is set appropriately */
2059 new_tmp = tmp = readl(port_mmio + PORT_CMD);
2060 if (*class == ATA_DEV_ATAPI)
2061 new_tmp |= PORT_CMD_ATAPI;
2063 new_tmp &= ~PORT_CMD_ATAPI;
2064 if (new_tmp != tmp) {
2065 writel(new_tmp, port_mmio + PORT_CMD);
2066 readl(port_mmio + PORT_CMD); /* flush */
2070 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
2072 struct scatterlist *sg;
2073 struct ahci_sg *ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
2079 * Next, the S/G list.
2081 for_each_sg(qc->sg, sg, qc->n_elem, si) {
2082 dma_addr_t addr = sg_dma_address(sg);
2083 u32 sg_len = sg_dma_len(sg);
2085 ahci_sg[si].addr = cpu_to_le32(addr & 0xffffffff);
2086 ahci_sg[si].addr_hi = cpu_to_le32((addr >> 16) >> 16);
2087 ahci_sg[si].flags_size = cpu_to_le32(sg_len - 1);
2093 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc)
2095 struct ata_port *ap = qc->ap;
2096 struct ahci_port_priv *pp = ap->private_data;
2098 if (!sata_pmp_attached(ap) || pp->fbs_enabled)
2099 return ata_std_qc_defer(qc);
2101 return sata_pmp_qc_defer_cmd_switch(qc);
2104 static void ahci_qc_prep(struct ata_queued_cmd *qc)
2106 struct ata_port *ap = qc->ap;
2107 struct ahci_port_priv *pp = ap->private_data;
2108 int is_atapi = ata_is_atapi(qc->tf.protocol);
2111 const u32 cmd_fis_len = 5; /* five dwords */
2112 unsigned int n_elem;
2115 * Fill in command table information. First, the header,
2116 * a SATA Register - Host to Device command FIS.
2118 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
2120 ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
2122 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
2123 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
2127 if (qc->flags & ATA_QCFLAG_DMAMAP)
2128 n_elem = ahci_fill_sg(qc, cmd_tbl);
2131 * Fill in command slot information.
2133 opts = cmd_fis_len | n_elem << 16 | (qc->dev->link->pmp << 12);
2134 if (qc->tf.flags & ATA_TFLAG_WRITE)
2135 opts |= AHCI_CMD_WRITE;
2137 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
2139 ahci_fill_cmd_slot(pp, qc->tag, opts);
2142 static void ahci_fbs_dec_intr(struct ata_port *ap)
2144 struct ahci_port_priv *pp = ap->private_data;
2145 void __iomem *port_mmio = ahci_port_base(ap);
2146 u32 fbs = readl(port_mmio + PORT_FBS);
2150 BUG_ON(!pp->fbs_enabled);
2152 /* time to wait for DEC is not specified by AHCI spec,
2153 * add a retry loop for safety.
2155 writel(fbs | PORT_FBS_DEC, port_mmio + PORT_FBS);
2156 fbs = readl(port_mmio + PORT_FBS);
2157 while ((fbs & PORT_FBS_DEC) && retries--) {
2159 fbs = readl(port_mmio + PORT_FBS);
2162 if (fbs & PORT_FBS_DEC)
2163 dev_printk(KERN_ERR, ap->host->dev,
2164 "failed to clear device error\n");
2167 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
2169 struct ahci_host_priv *hpriv = ap->host->private_data;
2170 struct ahci_port_priv *pp = ap->private_data;
2171 struct ata_eh_info *host_ehi = &ap->link.eh_info;
2172 struct ata_link *link = NULL;
2173 struct ata_queued_cmd *active_qc;
2174 struct ata_eh_info *active_ehi;
2175 bool fbs_need_dec = false;
2178 /* determine active link with error */
2179 if (pp->fbs_enabled) {
2180 void __iomem *port_mmio = ahci_port_base(ap);
2181 u32 fbs = readl(port_mmio + PORT_FBS);
2182 int pmp = fbs >> PORT_FBS_DWE_OFFSET;
2184 if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) &&
2185 ata_link_online(&ap->pmp_link[pmp])) {
2186 link = &ap->pmp_link[pmp];
2187 fbs_need_dec = true;
2191 ata_for_each_link(link, ap, EDGE)
2192 if (ata_link_active(link))
2198 active_qc = ata_qc_from_tag(ap, link->active_tag);
2199 active_ehi = &link->eh_info;
2201 /* record irq stat */
2202 ata_ehi_clear_desc(host_ehi);
2203 ata_ehi_push_desc(host_ehi, "irq_stat 0x%08x", irq_stat);
2205 /* AHCI needs SError cleared; otherwise, it might lock up */
2206 ahci_scr_read(&ap->link, SCR_ERROR, &serror);
2207 ahci_scr_write(&ap->link, SCR_ERROR, serror);
2208 host_ehi->serror |= serror;
2210 /* some controllers set IRQ_IF_ERR on device errors, ignore it */
2211 if (hpriv->flags & AHCI_HFLAG_IGN_IRQ_IF_ERR)
2212 irq_stat &= ~PORT_IRQ_IF_ERR;
2214 if (irq_stat & PORT_IRQ_TF_ERR) {
2215 /* If qc is active, charge it; otherwise, the active
2216 * link. There's no active qc on NCQ errors. It will
2217 * be determined by EH by reading log page 10h.
2220 active_qc->err_mask |= AC_ERR_DEV;
2222 active_ehi->err_mask |= AC_ERR_DEV;
2224 if (hpriv->flags & AHCI_HFLAG_IGN_SERR_INTERNAL)
2225 host_ehi->serror &= ~SERR_INTERNAL;
2228 if (irq_stat & PORT_IRQ_UNK_FIS) {
2229 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
2231 active_ehi->err_mask |= AC_ERR_HSM;
2232 active_ehi->action |= ATA_EH_RESET;
2233 ata_ehi_push_desc(active_ehi,
2234 "unknown FIS %08x %08x %08x %08x" ,
2235 unk[0], unk[1], unk[2], unk[3]);
2238 if (sata_pmp_attached(ap) && (irq_stat & PORT_IRQ_BAD_PMP)) {
2239 active_ehi->err_mask |= AC_ERR_HSM;
2240 active_ehi->action |= ATA_EH_RESET;
2241 ata_ehi_push_desc(active_ehi, "incorrect PMP");
2244 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
2245 host_ehi->err_mask |= AC_ERR_HOST_BUS;
2246 host_ehi->action |= ATA_EH_RESET;
2247 ata_ehi_push_desc(host_ehi, "host bus error");
2250 if (irq_stat & PORT_IRQ_IF_ERR) {
2252 active_ehi->err_mask |= AC_ERR_DEV;
2254 host_ehi->err_mask |= AC_ERR_ATA_BUS;
2255 host_ehi->action |= ATA_EH_RESET;
2258 ata_ehi_push_desc(host_ehi, "interface fatal error");
2261 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
2262 ata_ehi_hotplugged(host_ehi);
2263 ata_ehi_push_desc(host_ehi, "%s",
2264 irq_stat & PORT_IRQ_CONNECT ?
2265 "connection status changed" : "PHY RDY changed");
2268 /* okay, let's hand over to EH */
2270 if (irq_stat & PORT_IRQ_FREEZE)
2271 ata_port_freeze(ap);
2272 else if (fbs_need_dec) {
2273 ata_link_abort(link);
2274 ahci_fbs_dec_intr(ap);
2279 static void ahci_port_intr(struct ata_port *ap)
2281 void __iomem *port_mmio = ahci_port_base(ap);
2282 struct ata_eh_info *ehi = &ap->link.eh_info;
2283 struct ahci_port_priv *pp = ap->private_data;
2284 struct ahci_host_priv *hpriv = ap->host->private_data;
2285 int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
2286 u32 status, qc_active = 0;
2289 status = readl(port_mmio + PORT_IRQ_STAT);
2290 writel(status, port_mmio + PORT_IRQ_STAT);
2292 /* ignore BAD_PMP while resetting */
2293 if (unlikely(resetting))
2294 status &= ~PORT_IRQ_BAD_PMP;
2296 /* If we are getting PhyRdy, this is
2297 * just a power state change, we should
2298 * clear out this, plus the PhyRdy/Comm
2299 * Wake bits from Serror
2301 if ((hpriv->flags & AHCI_HFLAG_NO_HOTPLUG) &&
2302 (status & PORT_IRQ_PHYRDY)) {
2303 status &= ~PORT_IRQ_PHYRDY;
2304 ahci_scr_write(&ap->link, SCR_ERROR, ((1 << 16) | (1 << 18)));
2307 if (unlikely(status & PORT_IRQ_ERROR)) {
2308 ahci_error_intr(ap, status);
2312 if (status & PORT_IRQ_SDB_FIS) {
2313 /* If SNotification is available, leave notification
2314 * handling to sata_async_notification(). If not,
2315 * emulate it by snooping SDB FIS RX area.
2317 * Snooping FIS RX area is probably cheaper than
2318 * poking SNotification but some constrollers which
2319 * implement SNotification, ICH9 for example, don't
2320 * store AN SDB FIS into receive area.
2322 if (hpriv->cap & HOST_CAP_SNTF)
2323 sata_async_notification(ap);
2325 /* If the 'N' bit in word 0 of the FIS is set,
2326 * we just received asynchronous notification.
2327 * Tell libata about it.
2329 * Lack of SNotification should not appear in
2330 * ahci 1.2, so the workaround is unnecessary
2331 * when FBS is enabled.
2333 if (pp->fbs_enabled)
2336 const __le32 *f = pp->rx_fis + RX_FIS_SDB;
2337 u32 f0 = le32_to_cpu(f[0]);
2339 sata_async_notification(ap);
2344 /* pp->active_link is not reliable once FBS is enabled, both
2345 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
2346 * NCQ and non-NCQ commands may be in flight at the same time.
2348 if (pp->fbs_enabled) {
2349 if (ap->qc_active) {
2350 qc_active = readl(port_mmio + PORT_SCR_ACT);
2351 qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
2354 /* pp->active_link is valid iff any command is in flight */
2355 if (ap->qc_active && pp->active_link->sactive)
2356 qc_active = readl(port_mmio + PORT_SCR_ACT);
2358 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
2361 rc = ata_qc_complete_multiple(ap, qc_active);
2363 /* while resetting, invalid completions are expected */
2364 if (unlikely(rc < 0 && !resetting)) {
2365 ehi->err_mask |= AC_ERR_HSM;
2366 ehi->action |= ATA_EH_RESET;
2367 ata_port_freeze(ap);
2371 static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
2373 struct ata_host *host = dev_instance;
2374 struct ahci_host_priv *hpriv;
2375 unsigned int i, handled = 0;
2377 u32 irq_stat, irq_masked;
2381 hpriv = host->private_data;
2384 /* sigh. 0xffffffff is a valid return from h/w */
2385 irq_stat = readl(mmio + HOST_IRQ_STAT);
2389 irq_masked = irq_stat & hpriv->port_map;
2391 spin_lock(&host->lock);
2393 for (i = 0; i < host->n_ports; i++) {
2394 struct ata_port *ap;
2396 if (!(irq_masked & (1 << i)))
2399 ap = host->ports[i];
2402 VPRINTK("port %u\n", i);
2404 VPRINTK("port %u (no irq)\n", i);
2405 if (ata_ratelimit())
2406 dev_printk(KERN_WARNING, host->dev,
2407 "interrupt on disabled port %u\n", i);
2413 /* HOST_IRQ_STAT behaves as level triggered latch meaning that
2414 * it should be cleared after all the port events are cleared;
2415 * otherwise, it will raise a spurious interrupt after each
2416 * valid one. Please read section 10.6.2 of ahci 1.1 for more
2419 * Also, use the unmasked value to clear interrupt as spurious
2420 * pending event on a dummy port might cause screaming IRQ.
2422 writel(irq_stat, mmio + HOST_IRQ_STAT);
2424 spin_unlock(&host->lock);
2428 return IRQ_RETVAL(handled);
2431 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
2433 struct ata_port *ap = qc->ap;
2434 void __iomem *port_mmio = ahci_port_base(ap);
2435 struct ahci_port_priv *pp = ap->private_data;
2437 /* Keep track of the currently active link. It will be used
2438 * in completion path to determine whether NCQ phase is in
2441 pp->active_link = qc->dev->link;
2443 if (qc->tf.protocol == ATA_PROT_NCQ)
2444 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
2446 if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
2447 u32 fbs = readl(port_mmio + PORT_FBS);
2448 fbs &= ~(PORT_FBS_DEV_MASK | PORT_FBS_DEC);
2449 fbs |= qc->dev->link->pmp << PORT_FBS_DEV_OFFSET;
2450 writel(fbs, port_mmio + PORT_FBS);
2451 pp->fbs_last_dev = qc->dev->link->pmp;
2454 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
2456 ahci_sw_activity(qc->dev->link);
2461 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
2463 struct ahci_port_priv *pp = qc->ap->private_data;
2464 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
2466 if (pp->fbs_enabled)
2467 d2h_fis += qc->dev->link->pmp * AHCI_RX_FIS_SZ;
2469 ata_tf_from_fis(d2h_fis, &qc->result_tf);
2473 static void ahci_freeze(struct ata_port *ap)
2475 void __iomem *port_mmio = ahci_port_base(ap);
2478 writel(0, port_mmio + PORT_IRQ_MASK);
2481 static void ahci_thaw(struct ata_port *ap)
2483 struct ahci_host_priv *hpriv = ap->host->private_data;
2484 void __iomem *mmio = hpriv->mmio;
2485 void __iomem *port_mmio = ahci_port_base(ap);
2487 struct ahci_port_priv *pp = ap->private_data;
2490 tmp = readl(port_mmio + PORT_IRQ_STAT);
2491 writel(tmp, port_mmio + PORT_IRQ_STAT);
2492 writel(1 << ap->port_no, mmio + HOST_IRQ_STAT);
2494 /* turn IRQ back on */
2495 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2498 static void ahci_error_handler(struct ata_port *ap)
2500 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
2501 /* restart engine */
2502 ahci_stop_engine(ap);
2503 ahci_start_engine(ap);
2506 sata_pmp_error_handler(ap);
2509 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
2511 struct ata_port *ap = qc->ap;
2513 /* make DMA engine forget about the failed command */
2514 if (qc->flags & ATA_QCFLAG_FAILED)
2515 ahci_kick_engine(ap);
2518 static void ahci_enable_fbs(struct ata_port *ap)
2520 struct ahci_port_priv *pp = ap->private_data;
2521 void __iomem *port_mmio = ahci_port_base(ap);
2525 if (!pp->fbs_supported)
2528 fbs = readl(port_mmio + PORT_FBS);
2529 if (fbs & PORT_FBS_EN) {
2530 pp->fbs_enabled = true;
2531 pp->fbs_last_dev = -1; /* initialization */
2535 rc = ahci_stop_engine(ap);
2539 writel(fbs | PORT_FBS_EN, port_mmio + PORT_FBS);
2540 fbs = readl(port_mmio + PORT_FBS);
2541 if (fbs & PORT_FBS_EN) {
2542 dev_printk(KERN_INFO, ap->host->dev, "FBS is enabled.\n");
2543 pp->fbs_enabled = true;
2544 pp->fbs_last_dev = -1; /* initialization */
2546 dev_printk(KERN_ERR, ap->host->dev, "Failed to enable FBS\n");
2548 ahci_start_engine(ap);
2551 static void ahci_disable_fbs(struct ata_port *ap)
2553 struct ahci_port_priv *pp = ap->private_data;
2554 void __iomem *port_mmio = ahci_port_base(ap);
2558 if (!pp->fbs_supported)
2561 fbs = readl(port_mmio + PORT_FBS);
2562 if ((fbs & PORT_FBS_EN) == 0) {
2563 pp->fbs_enabled = false;
2567 rc = ahci_stop_engine(ap);
2571 writel(fbs & ~PORT_FBS_EN, port_mmio + PORT_FBS);
2572 fbs = readl(port_mmio + PORT_FBS);
2573 if (fbs & PORT_FBS_EN)
2574 dev_printk(KERN_ERR, ap->host->dev, "Failed to disable FBS\n");
2576 dev_printk(KERN_INFO, ap->host->dev, "FBS is disabled.\n");
2577 pp->fbs_enabled = false;
2580 ahci_start_engine(ap);
2583 static void ahci_pmp_attach(struct ata_port *ap)
2585 void __iomem *port_mmio = ahci_port_base(ap);
2586 struct ahci_port_priv *pp = ap->private_data;
2589 cmd = readl(port_mmio + PORT_CMD);
2590 cmd |= PORT_CMD_PMP;
2591 writel(cmd, port_mmio + PORT_CMD);
2593 ahci_enable_fbs(ap);
2595 pp->intr_mask |= PORT_IRQ_BAD_PMP;
2596 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2599 static void ahci_pmp_detach(struct ata_port *ap)
2601 void __iomem *port_mmio = ahci_port_base(ap);
2602 struct ahci_port_priv *pp = ap->private_data;
2605 ahci_disable_fbs(ap);
2607 cmd = readl(port_mmio + PORT_CMD);
2608 cmd &= ~PORT_CMD_PMP;
2609 writel(cmd, port_mmio + PORT_CMD);
2611 pp->intr_mask &= ~PORT_IRQ_BAD_PMP;
2612 writel(pp->intr_mask, port_mmio + PORT_IRQ_MASK);
2615 static int ahci_port_resume(struct ata_port *ap)
2618 ahci_start_port(ap);
2620 if (sata_pmp_attached(ap))
2621 ahci_pmp_attach(ap);
2623 ahci_pmp_detach(ap);
2629 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
2631 const char *emsg = NULL;
2634 rc = ahci_deinit_port(ap, &emsg);
2636 ahci_power_down(ap);
2638 ata_port_printk(ap, KERN_ERR, "%s (%d)\n", emsg, rc);
2639 ahci_start_port(ap);
2645 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
2647 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2648 struct ahci_host_priv *hpriv = host->private_data;
2649 void __iomem *mmio = hpriv->mmio;
2652 if (mesg.event & PM_EVENT_SUSPEND &&
2653 hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
2654 dev_printk(KERN_ERR, &pdev->dev,
2655 "BIOS update required for suspend/resume\n");
2659 if (mesg.event & PM_EVENT_SLEEP) {
2660 /* AHCI spec rev1.1 section 8.3.3:
2661 * Software must disable interrupts prior to requesting a
2662 * transition of the HBA to D3 state.
2664 ctl = readl(mmio + HOST_CTL);
2665 ctl &= ~HOST_IRQ_EN;
2666 writel(ctl, mmio + HOST_CTL);
2667 readl(mmio + HOST_CTL); /* flush */
2670 return ata_pci_device_suspend(pdev, mesg);
2673 static int ahci_pci_device_resume(struct pci_dev *pdev)
2675 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2678 rc = ata_pci_device_do_resume(pdev);
2682 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2683 rc = ahci_reset_controller(host);
2687 ahci_init_controller(host);
2690 ata_host_resume(host);
2696 static int ahci_port_start(struct ata_port *ap)
2698 struct ahci_host_priv *hpriv = ap->host->private_data;
2699 struct device *dev = ap->host->dev;
2700 struct ahci_port_priv *pp;
2703 size_t dma_sz, rx_fis_sz;
2705 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2709 /* check FBS capability */
2710 if ((hpriv->cap & HOST_CAP_FBS) && sata_pmp_supported(ap)) {
2711 void __iomem *port_mmio = ahci_port_base(ap);
2712 u32 cmd = readl(port_mmio + PORT_CMD);
2713 if (cmd & PORT_CMD_FBSCP)
2714 pp->fbs_supported = true;
2716 dev_printk(KERN_WARNING, dev,
2717 "The port is not capable of FBS\n");
2720 if (pp->fbs_supported) {
2721 dma_sz = AHCI_PORT_PRIV_FBS_DMA_SZ;
2722 rx_fis_sz = AHCI_RX_FIS_SZ * 16;
2724 dma_sz = AHCI_PORT_PRIV_DMA_SZ;
2725 rx_fis_sz = AHCI_RX_FIS_SZ;
2728 mem = dmam_alloc_coherent(dev, dma_sz, &mem_dma, GFP_KERNEL);
2731 memset(mem, 0, dma_sz);
2734 * First item in chunk of DMA memory: 32-slot command table,
2735 * 32 bytes each in size
2738 pp->cmd_slot_dma = mem_dma;
2740 mem += AHCI_CMD_SLOT_SZ;
2741 mem_dma += AHCI_CMD_SLOT_SZ;
2744 * Second item: Received-FIS area
2747 pp->rx_fis_dma = mem_dma;
2750 mem_dma += rx_fis_sz;
2753 * Third item: data area for storing a single command
2754 * and its scatter-gather table
2757 pp->cmd_tbl_dma = mem_dma;
2760 * Save off initial list of interrupts to be enabled.
2761 * This could be changed later
2763 pp->intr_mask = DEF_PORT_IRQ;
2765 ap->private_data = pp;
2767 /* engage engines, captain */
2768 return ahci_port_resume(ap);
2771 static void ahci_port_stop(struct ata_port *ap)
2773 const char *emsg = NULL;
2776 /* de-initialize port */
2777 rc = ahci_deinit_port(ap, &emsg);
2779 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
2782 static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
2787 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2788 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2790 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2792 dev_printk(KERN_ERR, &pdev->dev,
2793 "64-bit DMA enable failed\n");
2798 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2800 dev_printk(KERN_ERR, &pdev->dev,
2801 "32-bit DMA enable failed\n");
2804 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2806 dev_printk(KERN_ERR, &pdev->dev,
2807 "32-bit consistent DMA enable failed\n");
2814 static void ahci_print_info(struct ata_host *host)
2816 struct ahci_host_priv *hpriv = host->private_data;
2817 struct pci_dev *pdev = to_pci_dev(host->dev);
2818 void __iomem *mmio = hpriv->mmio;
2819 u32 vers, cap, cap2, impl, speed;
2820 const char *speed_s;
2824 vers = readl(mmio + HOST_VERSION);
2827 impl = hpriv->port_map;
2829 speed = (cap >> 20) & 0xf;
2832 else if (speed == 2)
2834 else if (speed == 3)
2839 pci_read_config_word(pdev, 0x0a, &cc);
2840 if (cc == PCI_CLASS_STORAGE_IDE)
2842 else if (cc == PCI_CLASS_STORAGE_SATA)
2844 else if (cc == PCI_CLASS_STORAGE_RAID)
2849 dev_printk(KERN_INFO, &pdev->dev,
2850 "AHCI %02x%02x.%02x%02x "
2851 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
2854 (vers >> 24) & 0xff,
2855 (vers >> 16) & 0xff,
2859 ((cap >> 8) & 0x1f) + 1,
2865 dev_printk(KERN_INFO, &pdev->dev,
2872 cap & HOST_CAP_64 ? "64bit " : "",
2873 cap & HOST_CAP_NCQ ? "ncq " : "",
2874 cap & HOST_CAP_SNTF ? "sntf " : "",
2875 cap & HOST_CAP_MPS ? "ilck " : "",
2876 cap & HOST_CAP_SSS ? "stag " : "",
2877 cap & HOST_CAP_ALPM ? "pm " : "",
2878 cap & HOST_CAP_LED ? "led " : "",
2879 cap & HOST_CAP_CLO ? "clo " : "",
2880 cap & HOST_CAP_ONLY ? "only " : "",
2881 cap & HOST_CAP_PMP ? "pmp " : "",
2882 cap & HOST_CAP_FBS ? "fbs " : "",
2883 cap & HOST_CAP_PIO_MULTI ? "pio " : "",
2884 cap & HOST_CAP_SSC ? "slum " : "",
2885 cap & HOST_CAP_PART ? "part " : "",
2886 cap & HOST_CAP_CCC ? "ccc " : "",
2887 cap & HOST_CAP_EMS ? "ems " : "",
2888 cap & HOST_CAP_SXS ? "sxs " : "",
2889 cap2 & HOST_CAP2_APST ? "apst " : "",
2890 cap2 & HOST_CAP2_NVMHCI ? "nvmp " : "",
2891 cap2 & HOST_CAP2_BOH ? "boh " : ""
2895 /* On ASUS P5W DH Deluxe, the second port of PCI device 00:1f.2 is
2896 * hardwired to on-board SIMG 4726. The chipset is ICH8 and doesn't
2897 * support PMP and the 4726 either directly exports the device
2898 * attached to the first downstream port or acts as a hardware storage
2899 * controller and emulate a single ATA device (can be RAID 0/1 or some
2900 * other configuration).
2902 * When there's no device attached to the first downstream port of the
2903 * 4726, "Config Disk" appears, which is a pseudo ATA device to
2904 * configure the 4726. However, ATA emulation of the device is very
2905 * lame. It doesn't send signature D2H Reg FIS after the initial
2906 * hardreset, pukes on SRST w/ PMP==0 and has bunch of other issues.
2908 * The following function works around the problem by always using
2909 * hardreset on the port and not depending on receiving signature FIS
2910 * afterward. If signature FIS isn't received soon, ATA class is
2911 * assumed without follow-up softreset.
2913 static void ahci_p5wdh_workaround(struct ata_host *host)
2915 static struct dmi_system_id sysids[] = {
2917 .ident = "P5W DH Deluxe",
2919 DMI_MATCH(DMI_SYS_VENDOR,
2920 "ASUSTEK COMPUTER INC"),
2921 DMI_MATCH(DMI_PRODUCT_NAME, "P5W DH Deluxe"),
2926 struct pci_dev *pdev = to_pci_dev(host->dev);
2928 if (pdev->bus->number == 0 && pdev->devfn == PCI_DEVFN(0x1f, 2) &&
2929 dmi_check_system(sysids)) {
2930 struct ata_port *ap = host->ports[1];
2932 dev_printk(KERN_INFO, &pdev->dev, "enabling ASUS P5W DH "
2933 "Deluxe on-board SIMG4726 workaround\n");
2935 ap->ops = &ahci_p5wdh_ops;
2936 ap->link.flags |= ATA_LFLAG_NO_SRST | ATA_LFLAG_ASSUME_ATA;
2940 /* only some SB600 ahci controllers can do 64bit DMA */
2941 static bool ahci_sb600_enable_64bit(struct pci_dev *pdev)
2943 static const struct dmi_system_id sysids[] = {
2945 * The oldest version known to be broken is 0901 and
2946 * working is 1501 which was released on 2007-10-26.
2947 * Enable 64bit DMA on 1501 and anything newer.
2949 * Please read bko#9412 for more info.
2952 .ident = "ASUS M2A-VM",
2954 DMI_MATCH(DMI_BOARD_VENDOR,
2955 "ASUSTeK Computer INC."),
2956 DMI_MATCH(DMI_BOARD_NAME, "M2A-VM"),
2958 .driver_data = "20071026", /* yyyymmdd */
2961 * All BIOS versions for the MSI K9A2 Platinum (MS-7376)
2962 * support 64bit DMA.
2964 * BIOS versions earlier than 1.5 had the Manufacturer DMI
2965 * fields as "MICRO-STAR INTERANTIONAL CO.,LTD".
2966 * This spelling mistake was fixed in BIOS version 1.5, so
2967 * 1.5 and later have the Manufacturer as
2968 * "MICRO-STAR INTERNATIONAL CO.,LTD".
2969 * So try to match on DMI_BOARD_VENDOR of "MICRO-STAR INTER".
2971 * BIOS versions earlier than 1.9 had a Board Product Name
2972 * DMI field of "MS-7376". This was changed to be
2973 * "K9A2 Platinum (MS-7376)" in version 1.9, but we can still
2974 * match on DMI_BOARD_NAME of "MS-7376".
2977 .ident = "MSI K9A2 Platinum",
2979 DMI_MATCH(DMI_BOARD_VENDOR,
2980 "MICRO-STAR INTER"),
2981 DMI_MATCH(DMI_BOARD_NAME, "MS-7376"),
2986 const struct dmi_system_id *match;
2987 int year, month, date;
2990 match = dmi_first_match(sysids);
2991 if (pdev->bus->number != 0 || pdev->devfn != PCI_DEVFN(0x12, 0) ||
2995 if (!match->driver_data)
2998 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
2999 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3001 if (strcmp(buf, match->driver_data) >= 0)
3004 dev_printk(KERN_WARNING, &pdev->dev, "%s: BIOS too old, "
3005 "forcing 32bit DMA, update BIOS\n", match->ident);
3010 dev_printk(KERN_WARNING, &pdev->dev, "%s: enabling 64bit DMA\n",
3015 static bool ahci_broken_system_poweroff(struct pci_dev *pdev)
3017 static const struct dmi_system_id broken_systems[] = {
3019 .ident = "HP Compaq nx6310",
3021 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3022 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq nx6310"),
3024 /* PCI slot number of the controller */
3025 .driver_data = (void *)0x1FUL,
3028 .ident = "HP Compaq 6720s",
3030 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3031 DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6720s"),
3033 /* PCI slot number of the controller */
3034 .driver_data = (void *)0x1FUL,
3037 { } /* terminate list */
3039 const struct dmi_system_id *dmi = dmi_first_match(broken_systems);
3042 unsigned long slot = (unsigned long)dmi->driver_data;
3043 /* apply the quirk only to on-board controllers */
3044 return slot == PCI_SLOT(pdev->devfn);
3050 static bool ahci_broken_suspend(struct pci_dev *pdev)
3052 static const struct dmi_system_id sysids[] = {
3054 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
3055 * to the harddisk doesn't become online after
3056 * resuming from STR. Warn and fail suspend.
3058 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
3060 * Use dates instead of versions to match as HP is
3061 * apparently recycling both product and version
3064 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
3069 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3070 DMI_MATCH(DMI_PRODUCT_NAME,
3071 "HP Pavilion dv4 Notebook PC"),
3073 .driver_data = "20090105", /* F.30 */
3078 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3079 DMI_MATCH(DMI_PRODUCT_NAME,
3080 "HP Pavilion dv5 Notebook PC"),
3082 .driver_data = "20090506", /* F.16 */
3087 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3088 DMI_MATCH(DMI_PRODUCT_NAME,
3089 "HP Pavilion dv6 Notebook PC"),
3091 .driver_data = "20090423", /* F.21 */
3096 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
3097 DMI_MATCH(DMI_PRODUCT_NAME,
3098 "HP HDX18 Notebook PC"),
3100 .driver_data = "20090430", /* F.23 */
3103 * Acer eMachines G725 has the same problem. BIOS
3104 * V1.03 is known to be broken. V3.04 is known to
3105 * work. Inbetween, there are V1.06, V2.06 and V3.03
3106 * that we don't have much idea about. For now,
3107 * blacklist anything older than V3.04.
3109 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
3114 DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
3115 DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
3117 .driver_data = "20091216", /* V3.04 */
3119 { } /* terminate list */
3121 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3122 int year, month, date;
3125 if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
3128 dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
3129 snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
3131 return strcmp(buf, dmi->driver_data) < 0;
3134 static bool ahci_broken_online(struct pci_dev *pdev)
3136 #define ENCODE_BUSDEVFN(bus, slot, func) \
3137 (void *)(unsigned long)(((bus) << 8) | PCI_DEVFN((slot), (func)))
3138 static const struct dmi_system_id sysids[] = {
3140 * There are several gigabyte boards which use
3141 * SIMG5723s configured as hardware RAID. Certain
3142 * 5723 firmware revisions shipped there keep the link
3143 * online but fail to answer properly to SRST or
3144 * IDENTIFY when no device is attached downstream
3145 * causing libata to retry quite a few times leading
3146 * to excessive detection delay.
3148 * As these firmwares respond to the second reset try
3149 * with invalid device signature, considering unknown
3150 * sig as offline works around the problem acceptably.
3153 .ident = "EP45-DQ6",
3155 DMI_MATCH(DMI_BOARD_VENDOR,
3156 "Gigabyte Technology Co., Ltd."),
3157 DMI_MATCH(DMI_BOARD_NAME, "EP45-DQ6"),
3159 .driver_data = ENCODE_BUSDEVFN(0x0a, 0x00, 0),
3162 .ident = "EP45-DS5",
3164 DMI_MATCH(DMI_BOARD_VENDOR,
3165 "Gigabyte Technology Co., Ltd."),
3166 DMI_MATCH(DMI_BOARD_NAME, "EP45-DS5"),
3168 .driver_data = ENCODE_BUSDEVFN(0x03, 0x00, 0),
3170 { } /* terminate list */
3172 #undef ENCODE_BUSDEVFN
3173 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3179 val = (unsigned long)dmi->driver_data;
3181 return pdev->bus->number == (val >> 8) && pdev->devfn == (val & 0xff);
3184 #ifdef CONFIG_ATA_ACPI
3185 static void ahci_gtf_filter_workaround(struct ata_host *host)
3187 static const struct dmi_system_id sysids[] = {
3189 * Aspire 3810T issues a bunch of SATA enable commands
3190 * via _GTF including an invalid one and one which is
3191 * rejected by the device. Among the successful ones
3192 * is FPDMA non-zero offset enable which when enabled
3193 * only on the drive side leads to NCQ command
3194 * failures. Filter it out.
3197 .ident = "Aspire 3810T",
3199 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
3200 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3810T"),
3202 .driver_data = (void *)ATA_ACPI_FILTER_FPDMA_OFFSET,
3206 const struct dmi_system_id *dmi = dmi_first_match(sysids);
3207 unsigned int filter;
3213 filter = (unsigned long)dmi->driver_data;
3214 dev_printk(KERN_INFO, host->dev,
3215 "applying extra ACPI _GTF filter 0x%x for %s\n",
3216 filter, dmi->ident);
3218 for (i = 0; i < host->n_ports; i++) {
3219 struct ata_port *ap = host->ports[i];
3220 struct ata_link *link;
3221 struct ata_device *dev;
3223 ata_for_each_link(link, ap, EDGE)
3224 ata_for_each_dev(dev, link, ALL)
3225 dev->gtf_filter |= filter;
3229 static inline void ahci_gtf_filter_workaround(struct ata_host *host)
3233 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3235 static int printed_version;
3236 unsigned int board_id = ent->driver_data;
3237 struct ata_port_info pi = ahci_port_info[board_id];
3238 const struct ata_port_info *ppi[] = { &pi, NULL };
3239 struct device *dev = &pdev->dev;
3240 struct ahci_host_priv *hpriv;
3241 struct ata_host *host;
3246 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
3248 if (!printed_version++)
3249 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
3251 /* The AHCI driver can only drive the SATA ports, the PATA driver
3252 can drive them all so if both drivers are selected make sure
3253 AHCI stays out of the way */
3254 if (pdev->vendor == PCI_VENDOR_ID_MARVELL && !marvell_enable)
3257 /* Promise's PDC42819 is a SAS/SATA controller that has an AHCI mode.
3258 * At the moment, we can only use the AHCI mode. Let the users know
3259 * that for SAS drives they're out of luck.
3261 if (pdev->vendor == PCI_VENDOR_ID_PROMISE)
3262 dev_printk(KERN_INFO, &pdev->dev, "PDC42819 "
3263 "can only drive SATA devices with this driver\n");
3265 /* acquire resources */
3266 rc = pcim_enable_device(pdev);
3270 /* AHCI controllers often implement SFF compatible interface.
3271 * Grab all PCI BARs just in case.
3273 rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
3275 pcim_pin_device(pdev);
3279 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
3280 (pdev->device == 0x2652 || pdev->device == 0x2653)) {
3283 /* ICH6s share the same PCI ID for both piix and ahci
3284 * modes. Enabling ahci mode while MAP indicates
3285 * combined mode is a bad idea. Yield to ata_piix.
3287 pci_read_config_byte(pdev, ICH_MAP, &map);
3289 dev_printk(KERN_INFO, &pdev->dev, "controller is in "
3290 "combined mode, can't enable AHCI mode\n");
3295 hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
3298 hpriv->flags |= (unsigned long)pi.private_data;
3300 /* MCP65 revision A1 and A2 can't do MSI */
3301 if (board_id == board_ahci_mcp65 &&
3302 (pdev->revision == 0xa1 || pdev->revision == 0xa2))
3303 hpriv->flags |= AHCI_HFLAG_NO_MSI;
3305 /* SB800 does NOT need the workaround to ignore SERR_INTERNAL */
3306 if (board_id == board_ahci_sb700 && pdev->revision >= 0x40)
3307 hpriv->flags &= ~AHCI_HFLAG_IGN_SERR_INTERNAL;
3309 /* only some SB600s can do 64bit DMA */
3310 if (ahci_sb600_enable_64bit(pdev))
3311 hpriv->flags &= ~AHCI_HFLAG_32BIT_ONLY;
3313 if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
3316 hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
3318 /* save initial config */
3319 ahci_save_initial_config(pdev, hpriv);
3322 if (hpriv->cap & HOST_CAP_NCQ) {
3323 pi.flags |= ATA_FLAG_NCQ;
3324 /* Auto-activate optimization is supposed to be supported on
3325 all AHCI controllers indicating NCQ support, but it seems
3326 to be broken at least on some NVIDIA MCP79 chipsets.
3327 Until we get info on which NVIDIA chipsets don't have this
3328 issue, if any, disable AA on all NVIDIA AHCIs. */
3329 if (pdev->vendor != PCI_VENDOR_ID_NVIDIA)
3330 pi.flags |= ATA_FLAG_FPDMA_AA;
3333 if (hpriv->cap & HOST_CAP_PMP)
3334 pi.flags |= ATA_FLAG_PMP;
3336 if (ahci_em_messages && (hpriv->cap & HOST_CAP_EMS)) {
3338 void __iomem *mmio = hpriv->mmio;
3339 u32 em_loc = readl(mmio + HOST_EM_LOC);
3340 u32 em_ctl = readl(mmio + HOST_EM_CTL);
3342 messages = (em_ctl & EM_CTRL_MSG_TYPE) >> 16;
3344 /* we only support LED message type right now */
3345 if ((messages & 0x01) && (ahci_em_messages == 1)) {
3347 hpriv->em_loc = ((em_loc >> 16) * 4);
3348 pi.flags |= ATA_FLAG_EM;
3349 if (!(em_ctl & EM_CTL_ALHD))
3350 pi.flags |= ATA_FLAG_SW_ACTIVITY;
3354 if (ahci_broken_system_poweroff(pdev)) {
3355 pi.flags |= ATA_FLAG_NO_POWEROFF_SPINDOWN;
3356 dev_info(&pdev->dev,
3357 "quirky BIOS, skipping spindown on poweroff\n");
3360 if (ahci_broken_suspend(pdev)) {
3361 hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
3362 dev_printk(KERN_WARNING, &pdev->dev,
3363 "BIOS update required for suspend/resume\n");
3366 if (ahci_broken_online(pdev)) {
3367 hpriv->flags |= AHCI_HFLAG_SRST_TOUT_IS_OFFLINE;
3368 dev_info(&pdev->dev,
3369 "online status unreliable, applying workaround\n");
3372 /* CAP.NP sometimes indicate the index of the last enabled
3373 * port, at other times, that of the last possible port, so
3374 * determining the maximum port number requires looking at
3375 * both CAP.NP and port_map.
3377 n_ports = max(ahci_nr_ports(hpriv->cap), fls(hpriv->port_map));
3379 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3382 host->private_data = hpriv;
3384 if (!(hpriv->cap & HOST_CAP_SSS) || ahci_ignore_sss)
3385 host->flags |= ATA_HOST_PARALLEL_SCAN;
3387 printk(KERN_INFO "ahci: SSS flag set, parallel bus scan disabled\n");
3389 if (pi.flags & ATA_FLAG_EM)
3390 ahci_reset_em(host);
3392 for (i = 0; i < host->n_ports; i++) {
3393 struct ata_port *ap = host->ports[i];
3395 ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
3396 ata_port_pbar_desc(ap, AHCI_PCI_BAR,
3397 0x100 + ap->port_no * 0x80, "port");
3399 /* set initial link pm policy */
3400 ap->pm_policy = NOT_AVAILABLE;
3402 /* set enclosure management message type */
3403 if (ap->flags & ATA_FLAG_EM)
3404 ap->em_message_type = ahci_em_messages;
3407 /* disabled/not-implemented port */
3408 if (!(hpriv->port_map & (1 << i)))
3409 ap->ops = &ata_dummy_port_ops;
3412 /* apply workaround for ASUS P5W DH Deluxe mainboard */
3413 ahci_p5wdh_workaround(host);
3415 /* apply gtf filter quirk */
3416 ahci_gtf_filter_workaround(host);
3418 /* initialize adapter */
3419 rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
3423 rc = ahci_reset_controller(host);
3427 ahci_init_controller(host);
3428 ahci_print_info(host);
3430 pci_set_master(pdev);
3431 return ata_host_activate(host, pdev->irq, ahci_interrupt, IRQF_SHARED,
3435 static int __init ahci_init(void)
3437 return pci_register_driver(&ahci_pci_driver);
3440 static void __exit ahci_exit(void)
3442 pci_unregister_driver(&ahci_pci_driver);
3446 MODULE_AUTHOR("Jeff Garzik");
3447 MODULE_DESCRIPTION("AHCI SATA low-level driver");
3448 MODULE_LICENSE("GPL");
3449 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
3450 MODULE_VERSION(DRV_VERSION);
3452 module_init(ahci_init);
3453 module_exit(ahci_exit);