2 * ahci.c - AHCI SATA support
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2004-2005 Red Hat, Inc.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 * libata documentation is available via 'make {ps|pdf}docs',
27 * as Documentation/DocBook/libata.*
29 * AHCI hardware documentation:
30 * http://www.intel.com/technology/serialata/pdf/rev1_0.pdf
31 * http://www.intel.com/technology/serialata/pdf/rev1_1.pdf
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/pci.h>
38 #include <linux/init.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h>
41 #include <linux/interrupt.h>
42 #include <linux/sched.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/device.h>
45 #include <scsi/scsi_host.h>
46 #include <scsi/scsi_cmnd.h>
47 #include <linux/libata.h>
50 #define DRV_NAME "ahci"
51 #define DRV_VERSION "2.0"
56 AHCI_MAX_SG = 168, /* hardware max is 64K */
57 AHCI_DMA_BOUNDARY = 0xffffffff,
58 AHCI_USE_CLUSTERING = 0,
61 AHCI_CMD_SLOT_SZ = AHCI_MAX_CMDS * AHCI_CMD_SZ,
63 AHCI_CMD_TBL_CDB = 0x40,
64 AHCI_CMD_TBL_HDR_SZ = 0x80,
65 AHCI_CMD_TBL_SZ = AHCI_CMD_TBL_HDR_SZ + (AHCI_MAX_SG * 16),
66 AHCI_CMD_TBL_AR_SZ = AHCI_CMD_TBL_SZ * AHCI_MAX_CMDS,
67 AHCI_PORT_PRIV_DMA_SZ = AHCI_CMD_SLOT_SZ + AHCI_CMD_TBL_AR_SZ +
69 AHCI_IRQ_ON_SG = (1 << 31),
70 AHCI_CMD_ATAPI = (1 << 5),
71 AHCI_CMD_WRITE = (1 << 6),
72 AHCI_CMD_PREFETCH = (1 << 7),
73 AHCI_CMD_RESET = (1 << 8),
74 AHCI_CMD_CLR_BUSY = (1 << 10),
76 RX_FIS_D2H_REG = 0x40, /* offset of D2H Register FIS data */
77 RX_FIS_UNK = 0x60, /* offset of Unknown FIS data */
80 board_ahci_vt8251 = 1,
82 /* global controller registers */
83 HOST_CAP = 0x00, /* host capabilities */
84 HOST_CTL = 0x04, /* global host control */
85 HOST_IRQ_STAT = 0x08, /* interrupt status */
86 HOST_PORTS_IMPL = 0x0c, /* bitmap of implemented ports */
87 HOST_VERSION = 0x10, /* AHCI spec. version compliancy */
90 HOST_RESET = (1 << 0), /* reset controller; self-clear */
91 HOST_IRQ_EN = (1 << 1), /* global IRQ enable */
92 HOST_AHCI_EN = (1 << 31), /* AHCI enabled */
95 HOST_CAP_SSC = (1 << 14), /* Slumber capable */
96 HOST_CAP_CLO = (1 << 24), /* Command List Override support */
97 HOST_CAP_SSS = (1 << 27), /* Staggered Spin-up */
98 HOST_CAP_NCQ = (1 << 30), /* Native Command Queueing */
99 HOST_CAP_64 = (1 << 31), /* PCI DAC (64-bit DMA) support */
101 /* registers for each SATA port */
102 PORT_LST_ADDR = 0x00, /* command list DMA addr */
103 PORT_LST_ADDR_HI = 0x04, /* command list DMA addr hi */
104 PORT_FIS_ADDR = 0x08, /* FIS rx buf addr */
105 PORT_FIS_ADDR_HI = 0x0c, /* FIS rx buf addr hi */
106 PORT_IRQ_STAT = 0x10, /* interrupt status */
107 PORT_IRQ_MASK = 0x14, /* interrupt enable/disable mask */
108 PORT_CMD = 0x18, /* port command */
109 PORT_TFDATA = 0x20, /* taskfile data */
110 PORT_SIG = 0x24, /* device TF signature */
111 PORT_CMD_ISSUE = 0x38, /* command issue */
112 PORT_SCR = 0x28, /* SATA phy register block */
113 PORT_SCR_STAT = 0x28, /* SATA phy register: SStatus */
114 PORT_SCR_CTL = 0x2c, /* SATA phy register: SControl */
115 PORT_SCR_ERR = 0x30, /* SATA phy register: SError */
116 PORT_SCR_ACT = 0x34, /* SATA phy register: SActive */
118 /* PORT_IRQ_{STAT,MASK} bits */
119 PORT_IRQ_COLD_PRES = (1 << 31), /* cold presence detect */
120 PORT_IRQ_TF_ERR = (1 << 30), /* task file error */
121 PORT_IRQ_HBUS_ERR = (1 << 29), /* host bus fatal error */
122 PORT_IRQ_HBUS_DATA_ERR = (1 << 28), /* host bus data error */
123 PORT_IRQ_IF_ERR = (1 << 27), /* interface fatal error */
124 PORT_IRQ_IF_NONFATAL = (1 << 26), /* interface non-fatal error */
125 PORT_IRQ_OVERFLOW = (1 << 24), /* xfer exhausted available S/G */
126 PORT_IRQ_BAD_PMP = (1 << 23), /* incorrect port multiplier */
128 PORT_IRQ_PHYRDY = (1 << 22), /* PhyRdy changed */
129 PORT_IRQ_DEV_ILCK = (1 << 7), /* device interlock */
130 PORT_IRQ_CONNECT = (1 << 6), /* port connect change status */
131 PORT_IRQ_SG_DONE = (1 << 5), /* descriptor processed */
132 PORT_IRQ_UNK_FIS = (1 << 4), /* unknown FIS rx'd */
133 PORT_IRQ_SDB_FIS = (1 << 3), /* Set Device Bits FIS rx'd */
134 PORT_IRQ_DMAS_FIS = (1 << 2), /* DMA Setup FIS rx'd */
135 PORT_IRQ_PIOS_FIS = (1 << 1), /* PIO Setup FIS rx'd */
136 PORT_IRQ_D2H_REG_FIS = (1 << 0), /* D2H Register FIS rx'd */
138 PORT_IRQ_FREEZE = PORT_IRQ_HBUS_ERR |
143 PORT_IRQ_ERROR = PORT_IRQ_FREEZE |
145 PORT_IRQ_HBUS_DATA_ERR,
146 DEF_PORT_IRQ = PORT_IRQ_ERROR | PORT_IRQ_SG_DONE |
147 PORT_IRQ_SDB_FIS | PORT_IRQ_DMAS_FIS |
148 PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS,
151 PORT_CMD_ATAPI = (1 << 24), /* Device is ATAPI */
152 PORT_CMD_LIST_ON = (1 << 15), /* cmd list DMA engine running */
153 PORT_CMD_FIS_ON = (1 << 14), /* FIS DMA engine running */
154 PORT_CMD_FIS_RX = (1 << 4), /* Enable FIS receive DMA engine */
155 PORT_CMD_CLO = (1 << 3), /* Command list override */
156 PORT_CMD_POWER_ON = (1 << 2), /* Power up device */
157 PORT_CMD_SPIN_UP = (1 << 1), /* Spin up device */
158 PORT_CMD_START = (1 << 0), /* Enable port DMA engine */
160 PORT_CMD_ICC_MASK = (0xf << 28), /* i/f ICC state mask */
161 PORT_CMD_ICC_ACTIVE = (0x1 << 28), /* Put i/f in active state */
162 PORT_CMD_ICC_PARTIAL = (0x2 << 28), /* Put i/f in partial state */
163 PORT_CMD_ICC_SLUMBER = (0x6 << 28), /* Put i/f in slumber state */
165 /* hpriv->flags bits */
166 AHCI_FLAG_MSI = (1 << 0),
169 AHCI_FLAG_RESET_NEEDS_CLO = (1 << 24),
170 AHCI_FLAG_NO_NCQ = (1 << 25),
173 struct ahci_cmd_hdr {
188 struct ahci_host_priv {
190 u32 cap; /* cache of HOST_CAP register */
191 u32 port_map; /* cache of HOST_PORTS_IMPL reg */
194 struct ahci_port_priv {
195 struct ahci_cmd_hdr *cmd_slot;
196 dma_addr_t cmd_slot_dma;
198 dma_addr_t cmd_tbl_dma;
200 dma_addr_t rx_fis_dma;
203 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg);
204 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
205 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
206 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
207 static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *regs);
208 static void ahci_irq_clear(struct ata_port *ap);
209 static int ahci_port_start(struct ata_port *ap);
210 static void ahci_port_stop(struct ata_port *ap);
211 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
212 static void ahci_qc_prep(struct ata_queued_cmd *qc);
213 static u8 ahci_check_status(struct ata_port *ap);
214 static void ahci_freeze(struct ata_port *ap);
215 static void ahci_thaw(struct ata_port *ap);
216 static void ahci_error_handler(struct ata_port *ap);
217 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc);
218 static void ahci_remove_one (struct pci_dev *pdev);
220 static struct scsi_host_template ahci_sht = {
221 .module = THIS_MODULE,
223 .ioctl = ata_scsi_ioctl,
224 .queuecommand = ata_scsi_queuecmd,
225 .change_queue_depth = ata_scsi_change_queue_depth,
226 .can_queue = AHCI_MAX_CMDS - 1,
227 .this_id = ATA_SHT_THIS_ID,
228 .sg_tablesize = AHCI_MAX_SG,
229 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
230 .emulated = ATA_SHT_EMULATED,
231 .use_clustering = AHCI_USE_CLUSTERING,
232 .proc_name = DRV_NAME,
233 .dma_boundary = AHCI_DMA_BOUNDARY,
234 .slave_configure = ata_scsi_slave_config,
235 .slave_destroy = ata_scsi_slave_destroy,
236 .bios_param = ata_std_bios_param,
239 static const struct ata_port_operations ahci_ops = {
240 .port_disable = ata_port_disable,
242 .check_status = ahci_check_status,
243 .check_altstatus = ahci_check_status,
244 .dev_select = ata_noop_dev_select,
246 .tf_read = ahci_tf_read,
248 .qc_prep = ahci_qc_prep,
249 .qc_issue = ahci_qc_issue,
251 .irq_handler = ahci_interrupt,
252 .irq_clear = ahci_irq_clear,
254 .scr_read = ahci_scr_read,
255 .scr_write = ahci_scr_write,
257 .freeze = ahci_freeze,
260 .error_handler = ahci_error_handler,
261 .post_internal_cmd = ahci_post_internal_cmd,
263 .port_start = ahci_port_start,
264 .port_stop = ahci_port_stop,
267 static const struct ata_port_info ahci_port_info[] = {
271 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
272 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
273 ATA_FLAG_SKIP_D2H_BSY,
274 .pio_mask = 0x1f, /* pio0-4 */
275 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
276 .port_ops = &ahci_ops,
278 /* board_ahci_vt8251 */
281 .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
282 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA |
283 ATA_FLAG_SKIP_D2H_BSY |
284 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ,
285 .pio_mask = 0x1f, /* pio0-4 */
286 .udma_mask = 0x7f, /* udma0-6 ; FIXME */
287 .port_ops = &ahci_ops,
291 static const struct pci_device_id ahci_pci_tbl[] = {
293 { PCI_VENDOR_ID_INTEL, 0x2652, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
294 board_ahci }, /* ICH6 */
295 { PCI_VENDOR_ID_INTEL, 0x2653, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
296 board_ahci }, /* ICH6M */
297 { PCI_VENDOR_ID_INTEL, 0x27c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
298 board_ahci }, /* ICH7 */
299 { PCI_VENDOR_ID_INTEL, 0x27c5, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
300 board_ahci }, /* ICH7M */
301 { PCI_VENDOR_ID_INTEL, 0x27c3, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
302 board_ahci }, /* ICH7R */
303 { PCI_VENDOR_ID_AL, 0x5288, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
304 board_ahci }, /* ULi M5288 */
305 { PCI_VENDOR_ID_INTEL, 0x2681, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
306 board_ahci }, /* ESB2 */
307 { PCI_VENDOR_ID_INTEL, 0x2682, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
308 board_ahci }, /* ESB2 */
309 { PCI_VENDOR_ID_INTEL, 0x2683, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
310 board_ahci }, /* ESB2 */
311 { PCI_VENDOR_ID_INTEL, 0x27c6, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
312 board_ahci }, /* ICH7-M DH */
313 { PCI_VENDOR_ID_INTEL, 0x2821, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
314 board_ahci }, /* ICH8 */
315 { PCI_VENDOR_ID_INTEL, 0x2822, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
316 board_ahci }, /* ICH8 */
317 { PCI_VENDOR_ID_INTEL, 0x2824, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
318 board_ahci }, /* ICH8 */
319 { PCI_VENDOR_ID_INTEL, 0x2829, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
320 board_ahci }, /* ICH8M */
321 { PCI_VENDOR_ID_INTEL, 0x282a, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
322 board_ahci }, /* ICH8M */
325 { 0x197b, 0x2360, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
326 board_ahci }, /* JMicron JMB360 */
327 { 0x197b, 0x2361, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
328 board_ahci }, /* JMicron JMB361 */
329 { 0x197b, 0x2363, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
330 board_ahci }, /* JMicron JMB363 */
331 { 0x197b, 0x2365, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
332 board_ahci }, /* JMicron JMB365 */
333 { 0x197b, 0x2366, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
334 board_ahci }, /* JMicron JMB366 */
337 { PCI_VENDOR_ID_ATI, 0x4380, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
338 board_ahci }, /* ATI SB600 non-raid */
339 { PCI_VENDOR_ID_ATI, 0x4381, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
340 board_ahci }, /* ATI SB600 raid */
343 { PCI_VENDOR_ID_VIA, 0x3349, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
344 board_ahci_vt8251 }, /* VIA VT8251 */
347 { PCI_VENDOR_ID_NVIDIA, 0x044c, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
348 board_ahci }, /* MCP65 */
349 { PCI_VENDOR_ID_NVIDIA, 0x044d, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
350 board_ahci }, /* MCP65 */
351 { PCI_VENDOR_ID_NVIDIA, 0x044e, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
352 board_ahci }, /* MCP65 */
353 { PCI_VENDOR_ID_NVIDIA, 0x044f, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
354 board_ahci }, /* MCP65 */
356 { } /* terminate list */
360 static struct pci_driver ahci_pci_driver = {
362 .id_table = ahci_pci_tbl,
363 .probe = ahci_init_one,
364 .remove = ahci_remove_one,
368 static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
370 return base + 0x100 + (port * 0x80);
373 static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
375 return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
378 static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
383 case SCR_STATUS: sc_reg = 0; break;
384 case SCR_CONTROL: sc_reg = 1; break;
385 case SCR_ERROR: sc_reg = 2; break;
386 case SCR_ACTIVE: sc_reg = 3; break;
391 return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
395 static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
401 case SCR_STATUS: sc_reg = 0; break;
402 case SCR_CONTROL: sc_reg = 1; break;
403 case SCR_ERROR: sc_reg = 2; break;
404 case SCR_ACTIVE: sc_reg = 3; break;
409 writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
412 static void ahci_start_engine(void __iomem *port_mmio)
417 tmp = readl(port_mmio + PORT_CMD);
418 tmp |= PORT_CMD_START;
419 writel(tmp, port_mmio + PORT_CMD);
420 readl(port_mmio + PORT_CMD); /* flush */
423 static int ahci_stop_engine(void __iomem *port_mmio)
427 tmp = readl(port_mmio + PORT_CMD);
429 /* check if the HBA is idle */
430 if ((tmp & (PORT_CMD_START | PORT_CMD_LIST_ON)) == 0)
433 /* setting HBA to idle */
434 tmp &= ~PORT_CMD_START;
435 writel(tmp, port_mmio + PORT_CMD);
437 /* wait for engine to stop. This could be as long as 500 msec */
438 tmp = ata_wait_register(port_mmio + PORT_CMD,
439 PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500);
440 if (tmp & PORT_CMD_LIST_ON)
446 static void ahci_start_fis_rx(void __iomem *port_mmio, u32 cap,
447 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
451 /* set FIS registers */
452 if (cap & HOST_CAP_64)
453 writel((cmd_slot_dma >> 16) >> 16, port_mmio + PORT_LST_ADDR_HI);
454 writel(cmd_slot_dma & 0xffffffff, port_mmio + PORT_LST_ADDR);
456 if (cap & HOST_CAP_64)
457 writel((rx_fis_dma >> 16) >> 16, port_mmio + PORT_FIS_ADDR_HI);
458 writel(rx_fis_dma & 0xffffffff, port_mmio + PORT_FIS_ADDR);
460 /* enable FIS reception */
461 tmp = readl(port_mmio + PORT_CMD);
462 tmp |= PORT_CMD_FIS_RX;
463 writel(tmp, port_mmio + PORT_CMD);
466 readl(port_mmio + PORT_CMD);
469 static int ahci_stop_fis_rx(void __iomem *port_mmio)
473 /* disable FIS reception */
474 tmp = readl(port_mmio + PORT_CMD);
475 tmp &= ~PORT_CMD_FIS_RX;
476 writel(tmp, port_mmio + PORT_CMD);
478 /* wait for completion, spec says 500ms, give it 1000 */
479 tmp = ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_FIS_ON,
480 PORT_CMD_FIS_ON, 10, 1000);
481 if (tmp & PORT_CMD_FIS_ON)
487 static void ahci_power_up(void __iomem *port_mmio, u32 cap)
491 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
494 if (cap & HOST_CAP_SSS) {
495 cmd |= PORT_CMD_SPIN_UP;
496 writel(cmd, port_mmio + PORT_CMD);
500 writel(cmd | PORT_CMD_ICC_ACTIVE, port_mmio + PORT_CMD);
503 static void ahci_power_down(void __iomem *port_mmio, u32 cap)
507 cmd = readl(port_mmio + PORT_CMD) & ~PORT_CMD_ICC_MASK;
509 if (cap & HOST_CAP_SSC) {
510 /* enable transitions to slumber mode */
511 scontrol = readl(port_mmio + PORT_SCR_CTL);
512 if ((scontrol & 0x0f00) > 0x100) {
514 writel(scontrol, port_mmio + PORT_SCR_CTL);
517 /* put device into slumber mode */
518 writel(cmd | PORT_CMD_ICC_SLUMBER, port_mmio + PORT_CMD);
520 /* wait for the transition to complete */
521 ata_wait_register(port_mmio + PORT_CMD, PORT_CMD_ICC_SLUMBER,
522 PORT_CMD_ICC_SLUMBER, 1, 50);
525 /* put device into listen mode */
526 if (cap & HOST_CAP_SSS) {
527 /* first set PxSCTL.DET to 0 */
528 scontrol = readl(port_mmio + PORT_SCR_CTL);
530 writel(scontrol, port_mmio + PORT_SCR_CTL);
532 /* then set PxCMD.SUD to 0 */
533 cmd &= ~PORT_CMD_SPIN_UP;
534 writel(cmd, port_mmio + PORT_CMD);
538 static void ahci_init_port(void __iomem *port_mmio, u32 cap,
539 dma_addr_t cmd_slot_dma, dma_addr_t rx_fis_dma)
542 ahci_power_up(port_mmio, cap);
544 /* enable FIS reception */
545 ahci_start_fis_rx(port_mmio, cap, cmd_slot_dma, rx_fis_dma);
548 ahci_start_engine(port_mmio);
551 static int ahci_deinit_port(void __iomem *port_mmio, u32 cap, const char **emsg)
556 rc = ahci_stop_engine(port_mmio);
558 *emsg = "failed to stop engine";
562 /* disable FIS reception */
563 rc = ahci_stop_fis_rx(port_mmio);
565 *emsg = "failed stop FIS RX";
569 /* put device into slumber mode */
570 ahci_power_down(port_mmio, cap);
575 static unsigned int ahci_dev_classify(struct ata_port *ap)
577 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
578 struct ata_taskfile tf;
581 tmp = readl(port_mmio + PORT_SIG);
582 tf.lbah = (tmp >> 24) & 0xff;
583 tf.lbam = (tmp >> 16) & 0xff;
584 tf.lbal = (tmp >> 8) & 0xff;
585 tf.nsect = (tmp) & 0xff;
587 return ata_dev_classify(&tf);
590 static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
593 dma_addr_t cmd_tbl_dma;
595 cmd_tbl_dma = pp->cmd_tbl_dma + tag * AHCI_CMD_TBL_SZ;
597 pp->cmd_slot[tag].opts = cpu_to_le32(opts);
598 pp->cmd_slot[tag].status = 0;
599 pp->cmd_slot[tag].tbl_addr = cpu_to_le32(cmd_tbl_dma & 0xffffffff);
600 pp->cmd_slot[tag].tbl_addr_hi = cpu_to_le32((cmd_tbl_dma >> 16) >> 16);
603 static int ahci_clo(struct ata_port *ap)
605 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
606 struct ahci_host_priv *hpriv = ap->host_set->private_data;
609 if (!(hpriv->cap & HOST_CAP_CLO))
612 tmp = readl(port_mmio + PORT_CMD);
614 writel(tmp, port_mmio + PORT_CMD);
616 tmp = ata_wait_register(port_mmio + PORT_CMD,
617 PORT_CMD_CLO, PORT_CMD_CLO, 1, 500);
618 if (tmp & PORT_CMD_CLO)
624 static int ahci_prereset(struct ata_port *ap)
626 if ((ap->flags & AHCI_FLAG_RESET_NEEDS_CLO) &&
627 (ata_busy_wait(ap, ATA_BUSY, 1000) & ATA_BUSY)) {
628 /* ATA_BUSY hasn't cleared, so send a CLO */
632 return ata_std_prereset(ap);
635 static int ahci_softreset(struct ata_port *ap, unsigned int *class)
637 struct ahci_port_priv *pp = ap->private_data;
638 void __iomem *mmio = ap->host_set->mmio_base;
639 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
640 const u32 cmd_fis_len = 5; /* five dwords */
641 const char *reason = NULL;
642 struct ata_taskfile tf;
649 if (ata_port_offline(ap)) {
650 DPRINTK("PHY reports no device\n");
651 *class = ATA_DEV_NONE;
655 /* prepare for SRST (AHCI-1.1 10.4.1) */
656 rc = ahci_stop_engine(port_mmio);
658 reason = "failed to stop engine";
662 /* check BUSY/DRQ, perform Command List Override if necessary */
663 ahci_tf_read(ap, &tf);
664 if (tf.command & (ATA_BUSY | ATA_DRQ)) {
667 if (rc == -EOPNOTSUPP) {
668 reason = "port busy but CLO unavailable";
671 reason = "port busy but CLO failed";
677 ahci_start_engine(port_mmio);
679 ata_tf_init(ap->device, &tf);
682 /* issue the first D2H Register FIS */
683 ahci_fill_cmd_slot(pp, 0,
684 cmd_fis_len | AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY);
687 ata_tf_to_fis(&tf, fis, 0);
688 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
690 writel(1, port_mmio + PORT_CMD_ISSUE);
692 tmp = ata_wait_register(port_mmio + PORT_CMD_ISSUE, 0x1, 0x1, 1, 500);
695 reason = "1st FIS failed";
699 /* spec says at least 5us, but be generous and sleep for 1ms */
702 /* issue the second D2H Register FIS */
703 ahci_fill_cmd_slot(pp, 0, cmd_fis_len);
706 ata_tf_to_fis(&tf, fis, 0);
707 fis[1] &= ~(1 << 7); /* turn off Command FIS bit */
709 writel(1, port_mmio + PORT_CMD_ISSUE);
710 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
712 /* spec mandates ">= 2ms" before checking status.
713 * We wait 150ms, because that was the magic delay used for
714 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
715 * between when the ATA command register is written, and then
716 * status is checked. Because waiting for "a while" before
717 * checking status is fine, post SRST, we perform this magic
718 * delay here as well.
722 *class = ATA_DEV_NONE;
723 if (ata_port_online(ap)) {
724 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
726 reason = "device not ready";
729 *class = ahci_dev_classify(ap);
732 DPRINTK("EXIT, class=%u\n", *class);
736 ahci_start_engine(port_mmio);
738 ata_port_printk(ap, KERN_ERR, "softreset failed (%s)\n", reason);
742 static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
744 struct ahci_port_priv *pp = ap->private_data;
745 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
746 struct ata_taskfile tf;
747 void __iomem *mmio = ap->host_set->mmio_base;
748 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
753 ahci_stop_engine(port_mmio);
755 /* clear D2H reception area to properly wait for D2H FIS */
756 ata_tf_init(ap->device, &tf);
758 ata_tf_to_fis(&tf, d2h_fis, 0);
760 rc = sata_std_hardreset(ap, class);
762 ahci_start_engine(port_mmio);
764 if (rc == 0 && ata_port_online(ap))
765 *class = ahci_dev_classify(ap);
766 if (*class == ATA_DEV_UNKNOWN)
767 *class = ATA_DEV_NONE;
769 DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
773 static void ahci_postreset(struct ata_port *ap, unsigned int *class)
775 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
778 ata_std_postreset(ap, class);
780 /* Make sure port's ATAPI bit is set appropriately */
781 new_tmp = tmp = readl(port_mmio + PORT_CMD);
782 if (*class == ATA_DEV_ATAPI)
783 new_tmp |= PORT_CMD_ATAPI;
785 new_tmp &= ~PORT_CMD_ATAPI;
786 if (new_tmp != tmp) {
787 writel(new_tmp, port_mmio + PORT_CMD);
788 readl(port_mmio + PORT_CMD); /* flush */
792 static u8 ahci_check_status(struct ata_port *ap)
794 void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
796 return readl(mmio + PORT_TFDATA) & 0xFF;
799 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
801 struct ahci_port_priv *pp = ap->private_data;
802 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
804 ata_tf_from_fis(d2h_fis, tf);
807 static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl)
809 struct scatterlist *sg;
810 struct ahci_sg *ahci_sg;
811 unsigned int n_sg = 0;
816 * Next, the S/G list.
818 ahci_sg = cmd_tbl + AHCI_CMD_TBL_HDR_SZ;
819 ata_for_each_sg(sg, qc) {
820 dma_addr_t addr = sg_dma_address(sg);
821 u32 sg_len = sg_dma_len(sg);
823 ahci_sg->addr = cpu_to_le32(addr & 0xffffffff);
824 ahci_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
825 ahci_sg->flags_size = cpu_to_le32(sg_len - 1);
834 static void ahci_qc_prep(struct ata_queued_cmd *qc)
836 struct ata_port *ap = qc->ap;
837 struct ahci_port_priv *pp = ap->private_data;
838 int is_atapi = is_atapi_taskfile(&qc->tf);
841 const u32 cmd_fis_len = 5; /* five dwords */
845 * Fill in command table information. First, the header,
846 * a SATA Register - Host to Device command FIS.
848 cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
850 ata_tf_to_fis(&qc->tf, cmd_tbl, 0);
852 memset(cmd_tbl + AHCI_CMD_TBL_CDB, 0, 32);
853 memcpy(cmd_tbl + AHCI_CMD_TBL_CDB, qc->cdb, qc->dev->cdb_len);
857 if (qc->flags & ATA_QCFLAG_DMAMAP)
858 n_elem = ahci_fill_sg(qc, cmd_tbl);
861 * Fill in command slot information.
863 opts = cmd_fis_len | n_elem << 16;
864 if (qc->tf.flags & ATA_TFLAG_WRITE)
865 opts |= AHCI_CMD_WRITE;
867 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;
869 ahci_fill_cmd_slot(pp, qc->tag, opts);
872 static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
874 struct ahci_port_priv *pp = ap->private_data;
875 struct ata_eh_info *ehi = &ap->eh_info;
876 unsigned int err_mask = 0, action = 0;
877 struct ata_queued_cmd *qc;
880 ata_ehi_clear_desc(ehi);
882 /* AHCI needs SError cleared; otherwise, it might lock up */
883 serror = ahci_scr_read(ap, SCR_ERROR);
884 ahci_scr_write(ap, SCR_ERROR, serror);
886 /* analyze @irq_stat */
887 ata_ehi_push_desc(ehi, "irq_stat 0x%08x", irq_stat);
889 if (irq_stat & PORT_IRQ_TF_ERR)
890 err_mask |= AC_ERR_DEV;
892 if (irq_stat & (PORT_IRQ_HBUS_ERR | PORT_IRQ_HBUS_DATA_ERR)) {
893 err_mask |= AC_ERR_HOST_BUS;
894 action |= ATA_EH_SOFTRESET;
897 if (irq_stat & PORT_IRQ_IF_ERR) {
898 err_mask |= AC_ERR_ATA_BUS;
899 action |= ATA_EH_SOFTRESET;
900 ata_ehi_push_desc(ehi, ", interface fatal error");
903 if (irq_stat & (PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)) {
904 ata_ehi_hotplugged(ehi);
905 ata_ehi_push_desc(ehi, ", %s", irq_stat & PORT_IRQ_CONNECT ?
906 "connection status changed" : "PHY RDY changed");
909 if (irq_stat & PORT_IRQ_UNK_FIS) {
910 u32 *unk = (u32 *)(pp->rx_fis + RX_FIS_UNK);
912 err_mask |= AC_ERR_HSM;
913 action |= ATA_EH_SOFTRESET;
914 ata_ehi_push_desc(ehi, ", unknown FIS %08x %08x %08x %08x",
915 unk[0], unk[1], unk[2], unk[3]);
918 /* okay, let's hand over to EH */
919 ehi->serror |= serror;
920 ehi->action |= action;
922 qc = ata_qc_from_tag(ap, ap->active_tag);
924 qc->err_mask |= err_mask;
926 ehi->err_mask |= err_mask;
928 if (irq_stat & PORT_IRQ_FREEZE)
934 static void ahci_host_intr(struct ata_port *ap)
936 void __iomem *mmio = ap->host_set->mmio_base;
937 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
938 struct ata_eh_info *ehi = &ap->eh_info;
939 u32 status, qc_active;
942 status = readl(port_mmio + PORT_IRQ_STAT);
943 writel(status, port_mmio + PORT_IRQ_STAT);
945 if (unlikely(status & PORT_IRQ_ERROR)) {
946 ahci_error_intr(ap, status);
951 qc_active = readl(port_mmio + PORT_SCR_ACT);
953 qc_active = readl(port_mmio + PORT_CMD_ISSUE);
955 rc = ata_qc_complete_multiple(ap, qc_active, NULL);
959 ehi->err_mask |= AC_ERR_HSM;
960 ehi->action |= ATA_EH_SOFTRESET;
965 /* hmmm... a spurious interupt */
967 /* some devices send D2H reg with I bit set during NCQ command phase */
968 if (ap->sactive && status & PORT_IRQ_D2H_REG_FIS)
971 /* ignore interim PIO setup fis interrupts */
972 if (ata_tag_valid(ap->active_tag)) {
973 struct ata_queued_cmd *qc =
974 ata_qc_from_tag(ap, ap->active_tag);
976 if (qc && qc->tf.protocol == ATA_PROT_PIO &&
977 (status & PORT_IRQ_PIOS_FIS))
982 ata_port_printk(ap, KERN_INFO, "spurious interrupt "
983 "(irq_stat 0x%x active_tag %d sactive 0x%x)\n",
984 status, ap->active_tag, ap->sactive);
987 static void ahci_irq_clear(struct ata_port *ap)
992 static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
994 struct ata_host_set *host_set = dev_instance;
995 struct ahci_host_priv *hpriv;
996 unsigned int i, handled = 0;
998 u32 irq_stat, irq_ack = 0;
1002 hpriv = host_set->private_data;
1003 mmio = host_set->mmio_base;
1005 /* sigh. 0xffffffff is a valid return from h/w */
1006 irq_stat = readl(mmio + HOST_IRQ_STAT);
1007 irq_stat &= hpriv->port_map;
1011 spin_lock(&host_set->lock);
1013 for (i = 0; i < host_set->n_ports; i++) {
1014 struct ata_port *ap;
1016 if (!(irq_stat & (1 << i)))
1019 ap = host_set->ports[i];
1022 VPRINTK("port %u\n", i);
1024 VPRINTK("port %u (no irq)\n", i);
1025 if (ata_ratelimit())
1026 dev_printk(KERN_WARNING, host_set->dev,
1027 "interrupt on disabled port %u\n", i);
1030 irq_ack |= (1 << i);
1034 writel(irq_ack, mmio + HOST_IRQ_STAT);
1038 spin_unlock(&host_set->lock);
1042 return IRQ_RETVAL(handled);
1045 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1047 struct ata_port *ap = qc->ap;
1048 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
1050 if (qc->tf.protocol == ATA_PROT_NCQ)
1051 writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
1052 writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
1053 readl(port_mmio + PORT_CMD_ISSUE); /* flush */
1058 static void ahci_freeze(struct ata_port *ap)
1060 void __iomem *mmio = ap->host_set->mmio_base;
1061 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1064 writel(0, port_mmio + PORT_IRQ_MASK);
1067 static void ahci_thaw(struct ata_port *ap)
1069 void __iomem *mmio = ap->host_set->mmio_base;
1070 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1074 tmp = readl(port_mmio + PORT_IRQ_STAT);
1075 writel(tmp, port_mmio + PORT_IRQ_STAT);
1076 writel(1 << ap->id, mmio + HOST_IRQ_STAT);
1078 /* turn IRQ back on */
1079 writel(DEF_PORT_IRQ, port_mmio + PORT_IRQ_MASK);
1082 static void ahci_error_handler(struct ata_port *ap)
1084 void __iomem *mmio = ap->host_set->mmio_base;
1085 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1087 if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
1088 /* restart engine */
1089 ahci_stop_engine(port_mmio);
1090 ahci_start_engine(port_mmio);
1093 /* perform recovery */
1094 ata_do_eh(ap, ahci_prereset, ahci_softreset, ahci_hardreset,
1098 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
1100 struct ata_port *ap = qc->ap;
1101 void __iomem *mmio = ap->host_set->mmio_base;
1102 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1104 if (qc->flags & ATA_QCFLAG_FAILED)
1105 qc->err_mask |= AC_ERR_OTHER;
1108 /* make DMA engine forget about the failed command */
1109 ahci_stop_engine(port_mmio);
1110 ahci_start_engine(port_mmio);
1114 static int ahci_port_start(struct ata_port *ap)
1116 struct device *dev = ap->host_set->dev;
1117 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1118 struct ahci_port_priv *pp;
1119 void __iomem *mmio = ap->host_set->mmio_base;
1120 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1125 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
1128 memset(pp, 0, sizeof(*pp));
1130 rc = ata_pad_alloc(ap, dev);
1136 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
1138 ata_pad_free(ap, dev);
1142 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
1145 * First item in chunk of DMA memory: 32-slot command table,
1146 * 32 bytes each in size
1149 pp->cmd_slot_dma = mem_dma;
1151 mem += AHCI_CMD_SLOT_SZ;
1152 mem_dma += AHCI_CMD_SLOT_SZ;
1155 * Second item: Received-FIS area
1158 pp->rx_fis_dma = mem_dma;
1160 mem += AHCI_RX_FIS_SZ;
1161 mem_dma += AHCI_RX_FIS_SZ;
1164 * Third item: data area for storing a single command
1165 * and its scatter-gather table
1168 pp->cmd_tbl_dma = mem_dma;
1170 ap->private_data = pp;
1172 /* initialize port */
1173 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma);
1178 static void ahci_port_stop(struct ata_port *ap)
1180 struct device *dev = ap->host_set->dev;
1181 struct ahci_host_priv *hpriv = ap->host_set->private_data;
1182 struct ahci_port_priv *pp = ap->private_data;
1183 void __iomem *mmio = ap->host_set->mmio_base;
1184 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
1185 const char *emsg = NULL;
1188 /* de-initialize port */
1189 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1191 ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
1193 ap->private_data = NULL;
1194 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
1195 pp->cmd_slot, pp->cmd_slot_dma);
1196 ata_pad_free(ap, dev);
1200 static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
1201 unsigned int port_idx)
1203 VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
1204 base = ahci_port_base_ul(base, port_idx);
1205 VPRINTK("base now==0x%lx\n", base);
1207 port->cmd_addr = base;
1208 port->scr_addr = base + PORT_SCR;
1213 static int ahci_host_init(struct ata_probe_ent *probe_ent)
1215 struct ahci_host_priv *hpriv = probe_ent->private_data;
1216 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1217 void __iomem *mmio = probe_ent->mmio_base;
1219 unsigned int i, using_dac;
1221 void __iomem *port_mmio;
1223 cap_save = readl(mmio + HOST_CAP);
1224 cap_save &= ( (1<<28) | (1<<17) );
1225 cap_save |= (1 << 27);
1227 /* global controller reset */
1228 tmp = readl(mmio + HOST_CTL);
1229 if ((tmp & HOST_RESET) == 0) {
1230 writel(tmp | HOST_RESET, mmio + HOST_CTL);
1231 readl(mmio + HOST_CTL); /* flush */
1234 /* reset must complete within 1 second, or
1235 * the hardware should be considered fried.
1239 tmp = readl(mmio + HOST_CTL);
1240 if (tmp & HOST_RESET) {
1241 dev_printk(KERN_ERR, &pdev->dev,
1242 "controller reset failed (0x%x)\n", tmp);
1246 writel(HOST_AHCI_EN, mmio + HOST_CTL);
1247 (void) readl(mmio + HOST_CTL); /* flush */
1248 writel(cap_save, mmio + HOST_CAP);
1249 writel(0xf, mmio + HOST_PORTS_IMPL);
1250 (void) readl(mmio + HOST_PORTS_IMPL); /* flush */
1252 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
1255 pci_read_config_word(pdev, 0x92, &tmp16);
1257 pci_write_config_word(pdev, 0x92, tmp16);
1260 hpriv->cap = readl(mmio + HOST_CAP);
1261 hpriv->port_map = readl(mmio + HOST_PORTS_IMPL);
1262 probe_ent->n_ports = (hpriv->cap & 0x1f) + 1;
1264 VPRINTK("cap 0x%x port_map 0x%x n_ports %d\n",
1265 hpriv->cap, hpriv->port_map, probe_ent->n_ports);
1267 using_dac = hpriv->cap & HOST_CAP_64;
1269 !pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
1270 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1272 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1274 dev_printk(KERN_ERR, &pdev->dev,
1275 "64-bit DMA enable failed\n");
1280 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1282 dev_printk(KERN_ERR, &pdev->dev,
1283 "32-bit DMA enable failed\n");
1286 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1288 dev_printk(KERN_ERR, &pdev->dev,
1289 "32-bit consistent DMA enable failed\n");
1294 for (i = 0; i < probe_ent->n_ports; i++) {
1295 const char *emsg = NULL;
1297 #if 0 /* BIOSen initialize this incorrectly */
1298 if (!(hpriv->port_map & (1 << i)))
1302 port_mmio = ahci_port_base(mmio, i);
1303 VPRINTK("mmio %p port_mmio %p\n", mmio, port_mmio);
1305 ahci_setup_port(&probe_ent->port[i],
1306 (unsigned long) mmio, i);
1308 /* make sure port is not active */
1309 rc = ahci_deinit_port(port_mmio, hpriv->cap, &emsg);
1311 dev_printk(KERN_WARNING, &pdev->dev,
1312 "%s (%d)\n", emsg, rc);
1315 tmp = readl(port_mmio + PORT_SCR_ERR);
1316 VPRINTK("PORT_SCR_ERR 0x%x\n", tmp);
1317 writel(tmp, port_mmio + PORT_SCR_ERR);
1319 /* clear & turn off port IRQ */
1320 tmp = readl(port_mmio + PORT_IRQ_STAT);
1321 VPRINTK("PORT_IRQ_STAT 0x%x\n", tmp);
1323 writel(tmp, port_mmio + PORT_IRQ_STAT);
1325 writel(1 << i, mmio + HOST_IRQ_STAT);
1326 writel(0, port_mmio + PORT_IRQ_MASK);
1329 tmp = readl(mmio + HOST_CTL);
1330 VPRINTK("HOST_CTL 0x%x\n", tmp);
1331 writel(tmp | HOST_IRQ_EN, mmio + HOST_CTL);
1332 tmp = readl(mmio + HOST_CTL);
1333 VPRINTK("HOST_CTL 0x%x\n", tmp);
1335 pci_set_master(pdev);
1340 static void ahci_print_info(struct ata_probe_ent *probe_ent)
1342 struct ahci_host_priv *hpriv = probe_ent->private_data;
1343 struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
1344 void __iomem *mmio = probe_ent->mmio_base;
1345 u32 vers, cap, impl, speed;
1346 const char *speed_s;
1350 vers = readl(mmio + HOST_VERSION);
1352 impl = hpriv->port_map;
1354 speed = (cap >> 20) & 0xf;
1357 else if (speed == 2)
1362 pci_read_config_word(pdev, 0x0a, &cc);
1365 else if (cc == 0x0106)
1367 else if (cc == 0x0104)
1372 dev_printk(KERN_INFO, &pdev->dev,
1373 "AHCI %02x%02x.%02x%02x "
1374 "%u slots %u ports %s Gbps 0x%x impl %s mode\n"
1377 (vers >> 24) & 0xff,
1378 (vers >> 16) & 0xff,
1382 ((cap >> 8) & 0x1f) + 1,
1388 dev_printk(KERN_INFO, &pdev->dev,
1394 cap & (1 << 31) ? "64bit " : "",
1395 cap & (1 << 30) ? "ncq " : "",
1396 cap & (1 << 28) ? "ilck " : "",
1397 cap & (1 << 27) ? "stag " : "",
1398 cap & (1 << 26) ? "pm " : "",
1399 cap & (1 << 25) ? "led " : "",
1401 cap & (1 << 24) ? "clo " : "",
1402 cap & (1 << 19) ? "nz " : "",
1403 cap & (1 << 18) ? "only " : "",
1404 cap & (1 << 17) ? "pmp " : "",
1405 cap & (1 << 15) ? "pio " : "",
1406 cap & (1 << 14) ? "slum " : "",
1407 cap & (1 << 13) ? "part " : ""
1411 static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1413 static int printed_version;
1414 struct ata_probe_ent *probe_ent = NULL;
1415 struct ahci_host_priv *hpriv;
1417 void __iomem *mmio_base;
1418 unsigned int board_idx = (unsigned int) ent->driver_data;
1419 int have_msi, pci_dev_busy = 0;
1424 WARN_ON(ATA_MAX_QUEUE > AHCI_MAX_CMDS);
1426 if (!printed_version++)
1427 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
1429 /* JMicron-specific fixup: make sure we're in AHCI mode */
1430 /* This is protected from races with ata_jmicron by the pci probe
1432 if (pdev->vendor == PCI_VENDOR_ID_JMICRON) {
1433 /* AHCI enable, AHCI on function 0 */
1434 pci_write_config_byte(pdev, 0x41, 0xa1);
1435 /* Function 1 is the PATA controller */
1436 if (PCI_FUNC(pdev->devfn))
1440 rc = pci_enable_device(pdev);
1444 rc = pci_request_regions(pdev, DRV_NAME);
1450 if (pci_enable_msi(pdev) == 0)
1457 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
1458 if (probe_ent == NULL) {
1463 memset(probe_ent, 0, sizeof(*probe_ent));
1464 probe_ent->dev = pci_dev_to_dev(pdev);
1465 INIT_LIST_HEAD(&probe_ent->node);
1467 mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
1468 if (mmio_base == NULL) {
1470 goto err_out_free_ent;
1472 base = (unsigned long) mmio_base;
1474 hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL);
1477 goto err_out_iounmap;
1479 memset(hpriv, 0, sizeof(*hpriv));
1481 probe_ent->sht = ahci_port_info[board_idx].sht;
1482 probe_ent->host_flags = ahci_port_info[board_idx].host_flags;
1483 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask;
1484 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask;
1485 probe_ent->port_ops = ahci_port_info[board_idx].port_ops;
1487 probe_ent->irq = pdev->irq;
1488 probe_ent->irq_flags = IRQF_SHARED;
1489 probe_ent->mmio_base = mmio_base;
1490 probe_ent->private_data = hpriv;
1493 hpriv->flags |= AHCI_FLAG_MSI;
1495 /* initialize adapter */
1496 rc = ahci_host_init(probe_ent);
1500 if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) &&
1501 (hpriv->cap & HOST_CAP_NCQ))
1502 probe_ent->host_flags |= ATA_FLAG_NCQ;
1504 ahci_print_info(probe_ent);
1506 /* FIXME: check ata_device_add return value */
1507 ata_device_add(probe_ent);
1515 pci_iounmap(pdev, mmio_base);
1520 pci_disable_msi(pdev);
1523 pci_release_regions(pdev);
1526 pci_disable_device(pdev);
1530 static void ahci_remove_one (struct pci_dev *pdev)
1532 struct device *dev = pci_dev_to_dev(pdev);
1533 struct ata_host_set *host_set = dev_get_drvdata(dev);
1534 struct ahci_host_priv *hpriv = host_set->private_data;
1538 for (i = 0; i < host_set->n_ports; i++)
1539 ata_port_detach(host_set->ports[i]);
1541 have_msi = hpriv->flags & AHCI_FLAG_MSI;
1542 free_irq(host_set->irq, host_set);
1544 for (i = 0; i < host_set->n_ports; i++) {
1545 struct ata_port *ap = host_set->ports[i];
1547 ata_scsi_release(ap->host);
1548 scsi_host_put(ap->host);
1552 pci_iounmap(pdev, host_set->mmio_base);
1556 pci_disable_msi(pdev);
1559 pci_release_regions(pdev);
1560 pci_disable_device(pdev);
1561 dev_set_drvdata(dev, NULL);
1564 static int __init ahci_init(void)
1566 return pci_module_init(&ahci_pci_driver);
1569 static void __exit ahci_exit(void)
1571 pci_unregister_driver(&ahci_pci_driver);
1575 MODULE_AUTHOR("Jeff Garzik");
1576 MODULE_DESCRIPTION("AHCI SATA low-level driver");
1577 MODULE_LICENSE("GPL");
1578 MODULE_DEVICE_TABLE(pci, ahci_pci_tbl);
1579 MODULE_VERSION(DRV_VERSION);
1581 module_init(ahci_init);
1582 module_exit(ahci_exit);