libata: kill ATA_LFLAG_HRST_TO_RESUME
[safe/jmp/linux-2.6] / drivers / ata / sata_nv.c
1 /*
2  *  sata_nv.c - NVIDIA nForce SATA
3  *
4  *  Copyright 2004 NVIDIA Corp.  All rights reserved.
5  *  Copyright 2004 Andrew Chew
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2, or (at your option)
11  *  any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; see the file COPYING.  If not, write to
20  *  the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21  *
22  *
23  *  libata documentation is available via 'make {ps|pdf}docs',
24  *  as Documentation/DocBook/libata.*
25  *
26  *  No hardware documentation available outside of NVIDIA.
27  *  This driver programs the NVIDIA SATA controller in a similar
28  *  fashion as with other PCI IDE BMDMA controllers, with a few
29  *  NV-specific details such as register offsets, SATA phy location,
30  *  hotplug info, etc.
31  *
32  *  CK804/MCP04 controllers support an alternate programming interface
33  *  similar to the ADMA specification (with some modifications).
34  *  This allows the use of NCQ. Non-DMA-mapped ATA commands are still
35  *  sent through the legacy interface.
36  *
37  */
38
39 #include <linux/kernel.h>
40 #include <linux/module.h>
41 #include <linux/pci.h>
42 #include <linux/init.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/interrupt.h>
46 #include <linux/device.h>
47 #include <scsi/scsi_host.h>
48 #include <scsi/scsi_device.h>
49 #include <linux/libata.h>
50
51 #define DRV_NAME                        "sata_nv"
52 #define DRV_VERSION                     "3.5"
53
54 #define NV_ADMA_DMA_BOUNDARY            0xffffffffUL
55
56 enum {
57         NV_MMIO_BAR                     = 5,
58
59         NV_PORTS                        = 2,
60         NV_PIO_MASK                     = 0x1f,
61         NV_MWDMA_MASK                   = 0x07,
62         NV_UDMA_MASK                    = 0x7f,
63         NV_PORT0_SCR_REG_OFFSET         = 0x00,
64         NV_PORT1_SCR_REG_OFFSET         = 0x40,
65
66         /* INT_STATUS/ENABLE */
67         NV_INT_STATUS                   = 0x10,
68         NV_INT_ENABLE                   = 0x11,
69         NV_INT_STATUS_CK804             = 0x440,
70         NV_INT_ENABLE_CK804             = 0x441,
71
72         /* INT_STATUS/ENABLE bits */
73         NV_INT_DEV                      = 0x01,
74         NV_INT_PM                       = 0x02,
75         NV_INT_ADDED                    = 0x04,
76         NV_INT_REMOVED                  = 0x08,
77
78         NV_INT_PORT_SHIFT               = 4,    /* each port occupies 4 bits */
79
80         NV_INT_ALL                      = 0x0f,
81         NV_INT_MASK                     = NV_INT_DEV |
82                                           NV_INT_ADDED | NV_INT_REMOVED,
83
84         /* INT_CONFIG */
85         NV_INT_CONFIG                   = 0x12,
86         NV_INT_CONFIG_METHD             = 0x01, // 0 = INT, 1 = SMI
87
88         // For PCI config register 20
89         NV_MCP_SATA_CFG_20              = 0x50,
90         NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
91         NV_MCP_SATA_CFG_20_PORT0_EN     = (1 << 17),
92         NV_MCP_SATA_CFG_20_PORT1_EN     = (1 << 16),
93         NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
94         NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
95
96         NV_ADMA_MAX_CPBS                = 32,
97         NV_ADMA_CPB_SZ                  = 128,
98         NV_ADMA_APRD_SZ                 = 16,
99         NV_ADMA_SGTBL_LEN               = (1024 - NV_ADMA_CPB_SZ) /
100                                            NV_ADMA_APRD_SZ,
101         NV_ADMA_SGTBL_TOTAL_LEN         = NV_ADMA_SGTBL_LEN + 5,
102         NV_ADMA_SGTBL_SZ                = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
103         NV_ADMA_PORT_PRIV_DMA_SZ        = NV_ADMA_MAX_CPBS *
104                                            (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
105
106         /* BAR5 offset to ADMA general registers */
107         NV_ADMA_GEN                     = 0x400,
108         NV_ADMA_GEN_CTL                 = 0x00,
109         NV_ADMA_NOTIFIER_CLEAR          = 0x30,
110
111         /* BAR5 offset to ADMA ports */
112         NV_ADMA_PORT                    = 0x480,
113
114         /* size of ADMA port register space  */
115         NV_ADMA_PORT_SIZE               = 0x100,
116
117         /* ADMA port registers */
118         NV_ADMA_CTL                     = 0x40,
119         NV_ADMA_CPB_COUNT               = 0x42,
120         NV_ADMA_NEXT_CPB_IDX            = 0x43,
121         NV_ADMA_STAT                    = 0x44,
122         NV_ADMA_CPB_BASE_LOW            = 0x48,
123         NV_ADMA_CPB_BASE_HIGH           = 0x4C,
124         NV_ADMA_APPEND                  = 0x50,
125         NV_ADMA_NOTIFIER                = 0x68,
126         NV_ADMA_NOTIFIER_ERROR          = 0x6C,
127
128         /* NV_ADMA_CTL register bits */
129         NV_ADMA_CTL_HOTPLUG_IEN         = (1 << 0),
130         NV_ADMA_CTL_CHANNEL_RESET       = (1 << 5),
131         NV_ADMA_CTL_GO                  = (1 << 7),
132         NV_ADMA_CTL_AIEN                = (1 << 8),
133         NV_ADMA_CTL_READ_NON_COHERENT   = (1 << 11),
134         NV_ADMA_CTL_WRITE_NON_COHERENT  = (1 << 12),
135
136         /* CPB response flag bits */
137         NV_CPB_RESP_DONE                = (1 << 0),
138         NV_CPB_RESP_ATA_ERR             = (1 << 3),
139         NV_CPB_RESP_CMD_ERR             = (1 << 4),
140         NV_CPB_RESP_CPB_ERR             = (1 << 7),
141
142         /* CPB control flag bits */
143         NV_CPB_CTL_CPB_VALID            = (1 << 0),
144         NV_CPB_CTL_QUEUE                = (1 << 1),
145         NV_CPB_CTL_APRD_VALID           = (1 << 2),
146         NV_CPB_CTL_IEN                  = (1 << 3),
147         NV_CPB_CTL_FPDMA                = (1 << 4),
148
149         /* APRD flags */
150         NV_APRD_WRITE                   = (1 << 1),
151         NV_APRD_END                     = (1 << 2),
152         NV_APRD_CONT                    = (1 << 3),
153
154         /* NV_ADMA_STAT flags */
155         NV_ADMA_STAT_TIMEOUT            = (1 << 0),
156         NV_ADMA_STAT_HOTUNPLUG          = (1 << 1),
157         NV_ADMA_STAT_HOTPLUG            = (1 << 2),
158         NV_ADMA_STAT_CPBERR             = (1 << 4),
159         NV_ADMA_STAT_SERROR             = (1 << 5),
160         NV_ADMA_STAT_CMD_COMPLETE       = (1 << 6),
161         NV_ADMA_STAT_IDLE               = (1 << 8),
162         NV_ADMA_STAT_LEGACY             = (1 << 9),
163         NV_ADMA_STAT_STOPPED            = (1 << 10),
164         NV_ADMA_STAT_DONE               = (1 << 12),
165         NV_ADMA_STAT_ERR                = NV_ADMA_STAT_CPBERR |
166                                           NV_ADMA_STAT_TIMEOUT,
167
168         /* port flags */
169         NV_ADMA_PORT_REGISTER_MODE      = (1 << 0),
170         NV_ADMA_ATAPI_SETUP_COMPLETE    = (1 << 1),
171
172         /* MCP55 reg offset */
173         NV_CTL_MCP55                    = 0x400,
174         NV_INT_STATUS_MCP55             = 0x440,
175         NV_INT_ENABLE_MCP55             = 0x444,
176         NV_NCQ_REG_MCP55                = 0x448,
177
178         /* MCP55 */
179         NV_INT_ALL_MCP55                = 0xffff,
180         NV_INT_PORT_SHIFT_MCP55         = 16,   /* each port occupies 16 bits */
181         NV_INT_MASK_MCP55               = NV_INT_ALL_MCP55 & 0xfffd,
182
183         /* SWNCQ ENABLE BITS*/
184         NV_CTL_PRI_SWNCQ                = 0x02,
185         NV_CTL_SEC_SWNCQ                = 0x04,
186
187         /* SW NCQ status bits*/
188         NV_SWNCQ_IRQ_DEV                = (1 << 0),
189         NV_SWNCQ_IRQ_PM                 = (1 << 1),
190         NV_SWNCQ_IRQ_ADDED              = (1 << 2),
191         NV_SWNCQ_IRQ_REMOVED            = (1 << 3),
192
193         NV_SWNCQ_IRQ_BACKOUT            = (1 << 4),
194         NV_SWNCQ_IRQ_SDBFIS             = (1 << 5),
195         NV_SWNCQ_IRQ_DHREGFIS           = (1 << 6),
196         NV_SWNCQ_IRQ_DMASETUP           = (1 << 7),
197
198         NV_SWNCQ_IRQ_HOTPLUG            = NV_SWNCQ_IRQ_ADDED |
199                                           NV_SWNCQ_IRQ_REMOVED,
200
201 };
202
203 /* ADMA Physical Region Descriptor - one SG segment */
204 struct nv_adma_prd {
205         __le64                  addr;
206         __le32                  len;
207         u8                      flags;
208         u8                      packet_len;
209         __le16                  reserved;
210 };
211
212 enum nv_adma_regbits {
213         CMDEND  = (1 << 15),            /* end of command list */
214         WNB     = (1 << 14),            /* wait-not-BSY */
215         IGN     = (1 << 13),            /* ignore this entry */
216         CS1n    = (1 << (4 + 8)),       /* std. PATA signals follow... */
217         DA2     = (1 << (2 + 8)),
218         DA1     = (1 << (1 + 8)),
219         DA0     = (1 << (0 + 8)),
220 };
221
222 /* ADMA Command Parameter Block
223    The first 5 SG segments are stored inside the Command Parameter Block itself.
224    If there are more than 5 segments the remainder are stored in a separate
225    memory area indicated by next_aprd. */
226 struct nv_adma_cpb {
227         u8                      resp_flags;    /* 0 */
228         u8                      reserved1;     /* 1 */
229         u8                      ctl_flags;     /* 2 */
230         /* len is length of taskfile in 64 bit words */
231         u8                      len;            /* 3  */
232         u8                      tag;           /* 4 */
233         u8                      next_cpb_idx;  /* 5 */
234         __le16                  reserved2;     /* 6-7 */
235         __le16                  tf[12];        /* 8-31 */
236         struct nv_adma_prd      aprd[5];       /* 32-111 */
237         __le64                  next_aprd;     /* 112-119 */
238         __le64                  reserved3;     /* 120-127 */
239 };
240
241
242 struct nv_adma_port_priv {
243         struct nv_adma_cpb      *cpb;
244         dma_addr_t              cpb_dma;
245         struct nv_adma_prd      *aprd;
246         dma_addr_t              aprd_dma;
247         void __iomem            *ctl_block;
248         void __iomem            *gen_block;
249         void __iomem            *notifier_clear_block;
250         u64                     adma_dma_mask;
251         u8                      flags;
252         int                     last_issue_ncq;
253 };
254
255 struct nv_host_priv {
256         unsigned long           type;
257 };
258
259 struct defer_queue {
260         u32             defer_bits;
261         unsigned int    head;
262         unsigned int    tail;
263         unsigned int    tag[ATA_MAX_QUEUE];
264 };
265
266 enum ncq_saw_flag_list {
267         ncq_saw_d2h     = (1U << 0),
268         ncq_saw_dmas    = (1U << 1),
269         ncq_saw_sdb     = (1U << 2),
270         ncq_saw_backout = (1U << 3),
271 };
272
273 struct nv_swncq_port_priv {
274         struct ata_prd  *prd;    /* our SG list */
275         dma_addr_t      prd_dma; /* and its DMA mapping */
276         void __iomem    *sactive_block;
277         void __iomem    *irq_block;
278         void __iomem    *tag_block;
279         u32             qc_active;
280
281         unsigned int    last_issue_tag;
282
283         /* fifo circular queue to store deferral command */
284         struct defer_queue defer_queue;
285
286         /* for NCQ interrupt analysis */
287         u32             dhfis_bits;
288         u32             dmafis_bits;
289         u32             sdbfis_bits;
290
291         unsigned int    ncq_flags;
292 };
293
294
295 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
296
297 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
298 #ifdef CONFIG_PM
299 static int nv_pci_device_resume(struct pci_dev *pdev);
300 #endif
301 static void nv_ck804_host_stop(struct ata_host *host);
302 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
303 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
304 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
305 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val);
306 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val);
307
308 static void nv_nf2_freeze(struct ata_port *ap);
309 static void nv_nf2_thaw(struct ata_port *ap);
310 static void nv_ck804_freeze(struct ata_port *ap);
311 static void nv_ck804_thaw(struct ata_port *ap);
312 static void nv_error_handler(struct ata_port *ap);
313 static int nv_adma_slave_config(struct scsi_device *sdev);
314 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
315 static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
316 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
317 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
318 static void nv_adma_irq_clear(struct ata_port *ap);
319 static int nv_adma_port_start(struct ata_port *ap);
320 static void nv_adma_port_stop(struct ata_port *ap);
321 #ifdef CONFIG_PM
322 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
323 static int nv_adma_port_resume(struct ata_port *ap);
324 #endif
325 static void nv_adma_freeze(struct ata_port *ap);
326 static void nv_adma_thaw(struct ata_port *ap);
327 static void nv_adma_error_handler(struct ata_port *ap);
328 static void nv_adma_host_stop(struct ata_host *host);
329 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
330 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
331
332 static void nv_mcp55_thaw(struct ata_port *ap);
333 static void nv_mcp55_freeze(struct ata_port *ap);
334 static void nv_swncq_error_handler(struct ata_port *ap);
335 static int nv_swncq_slave_config(struct scsi_device *sdev);
336 static int nv_swncq_port_start(struct ata_port *ap);
337 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
338 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
339 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
340 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
341 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
342 #ifdef CONFIG_PM
343 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
344 static int nv_swncq_port_resume(struct ata_port *ap);
345 #endif
346
347 enum nv_host_type
348 {
349         GENERIC,
350         NFORCE2,
351         NFORCE3 = NFORCE2,      /* NF2 == NF3 as far as sata_nv is concerned */
352         CK804,
353         ADMA,
354         SWNCQ,
355 };
356
357 static const struct pci_device_id nv_pci_tbl[] = {
358         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
359         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
360         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
361         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
362         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
363         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
364         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
365         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), SWNCQ },
366         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), SWNCQ },
367         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), SWNCQ },
368         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), SWNCQ },
369         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
370         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
371         { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
372
373         { } /* terminate list */
374 };
375
376 static struct pci_driver nv_pci_driver = {
377         .name                   = DRV_NAME,
378         .id_table               = nv_pci_tbl,
379         .probe                  = nv_init_one,
380 #ifdef CONFIG_PM
381         .suspend                = ata_pci_device_suspend,
382         .resume                 = nv_pci_device_resume,
383 #endif
384         .remove                 = ata_pci_remove_one,
385 };
386
387 static struct scsi_host_template nv_sht = {
388         .module                 = THIS_MODULE,
389         .name                   = DRV_NAME,
390         .ioctl                  = ata_scsi_ioctl,
391         .queuecommand           = ata_scsi_queuecmd,
392         .can_queue              = ATA_DEF_QUEUE,
393         .this_id                = ATA_SHT_THIS_ID,
394         .sg_tablesize           = LIBATA_MAX_PRD,
395         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
396         .emulated               = ATA_SHT_EMULATED,
397         .use_clustering         = ATA_SHT_USE_CLUSTERING,
398         .proc_name              = DRV_NAME,
399         .dma_boundary           = ATA_DMA_BOUNDARY,
400         .slave_configure        = ata_scsi_slave_config,
401         .slave_destroy          = ata_scsi_slave_destroy,
402         .bios_param             = ata_std_bios_param,
403 };
404
405 static struct scsi_host_template nv_adma_sht = {
406         .module                 = THIS_MODULE,
407         .name                   = DRV_NAME,
408         .ioctl                  = ata_scsi_ioctl,
409         .queuecommand           = ata_scsi_queuecmd,
410         .change_queue_depth     = ata_scsi_change_queue_depth,
411         .can_queue              = NV_ADMA_MAX_CPBS,
412         .this_id                = ATA_SHT_THIS_ID,
413         .sg_tablesize           = NV_ADMA_SGTBL_TOTAL_LEN,
414         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
415         .emulated               = ATA_SHT_EMULATED,
416         .use_clustering         = ATA_SHT_USE_CLUSTERING,
417         .proc_name              = DRV_NAME,
418         .dma_boundary           = NV_ADMA_DMA_BOUNDARY,
419         .slave_configure        = nv_adma_slave_config,
420         .slave_destroy          = ata_scsi_slave_destroy,
421         .bios_param             = ata_std_bios_param,
422 };
423
424 static struct scsi_host_template nv_swncq_sht = {
425         .module                 = THIS_MODULE,
426         .name                   = DRV_NAME,
427         .ioctl                  = ata_scsi_ioctl,
428         .queuecommand           = ata_scsi_queuecmd,
429         .change_queue_depth     = ata_scsi_change_queue_depth,
430         .can_queue              = ATA_MAX_QUEUE,
431         .this_id                = ATA_SHT_THIS_ID,
432         .sg_tablesize           = LIBATA_MAX_PRD,
433         .cmd_per_lun            = ATA_SHT_CMD_PER_LUN,
434         .emulated               = ATA_SHT_EMULATED,
435         .use_clustering         = ATA_SHT_USE_CLUSTERING,
436         .proc_name              = DRV_NAME,
437         .dma_boundary           = ATA_DMA_BOUNDARY,
438         .slave_configure        = nv_swncq_slave_config,
439         .slave_destroy          = ata_scsi_slave_destroy,
440         .bios_param             = ata_std_bios_param,
441 };
442
443 static const struct ata_port_operations nv_generic_ops = {
444         .tf_load                = ata_tf_load,
445         .tf_read                = ata_tf_read,
446         .exec_command           = ata_exec_command,
447         .check_status           = ata_check_status,
448         .dev_select             = ata_std_dev_select,
449         .bmdma_setup            = ata_bmdma_setup,
450         .bmdma_start            = ata_bmdma_start,
451         .bmdma_stop             = ata_bmdma_stop,
452         .bmdma_status           = ata_bmdma_status,
453         .qc_prep                = ata_qc_prep,
454         .qc_issue               = ata_qc_issue_prot,
455         .freeze                 = ata_bmdma_freeze,
456         .thaw                   = ata_bmdma_thaw,
457         .error_handler          = nv_error_handler,
458         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
459         .data_xfer              = ata_data_xfer,
460         .irq_clear              = ata_bmdma_irq_clear,
461         .irq_on                 = ata_irq_on,
462         .scr_read               = nv_scr_read,
463         .scr_write              = nv_scr_write,
464         .port_start             = ata_port_start,
465 };
466
467 static const struct ata_port_operations nv_nf2_ops = {
468         .tf_load                = ata_tf_load,
469         .tf_read                = ata_tf_read,
470         .exec_command           = ata_exec_command,
471         .check_status           = ata_check_status,
472         .dev_select             = ata_std_dev_select,
473         .bmdma_setup            = ata_bmdma_setup,
474         .bmdma_start            = ata_bmdma_start,
475         .bmdma_stop             = ata_bmdma_stop,
476         .bmdma_status           = ata_bmdma_status,
477         .qc_prep                = ata_qc_prep,
478         .qc_issue               = ata_qc_issue_prot,
479         .freeze                 = nv_nf2_freeze,
480         .thaw                   = nv_nf2_thaw,
481         .error_handler          = nv_error_handler,
482         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
483         .data_xfer              = ata_data_xfer,
484         .irq_clear              = ata_bmdma_irq_clear,
485         .irq_on                 = ata_irq_on,
486         .scr_read               = nv_scr_read,
487         .scr_write              = nv_scr_write,
488         .port_start             = ata_port_start,
489 };
490
491 static const struct ata_port_operations nv_ck804_ops = {
492         .tf_load                = ata_tf_load,
493         .tf_read                = ata_tf_read,
494         .exec_command           = ata_exec_command,
495         .check_status           = ata_check_status,
496         .dev_select             = ata_std_dev_select,
497         .bmdma_setup            = ata_bmdma_setup,
498         .bmdma_start            = ata_bmdma_start,
499         .bmdma_stop             = ata_bmdma_stop,
500         .bmdma_status           = ata_bmdma_status,
501         .qc_prep                = ata_qc_prep,
502         .qc_issue               = ata_qc_issue_prot,
503         .freeze                 = nv_ck804_freeze,
504         .thaw                   = nv_ck804_thaw,
505         .error_handler          = nv_error_handler,
506         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
507         .data_xfer              = ata_data_xfer,
508         .irq_clear              = ata_bmdma_irq_clear,
509         .irq_on                 = ata_irq_on,
510         .scr_read               = nv_scr_read,
511         .scr_write              = nv_scr_write,
512         .port_start             = ata_port_start,
513         .host_stop              = nv_ck804_host_stop,
514 };
515
516 static const struct ata_port_operations nv_adma_ops = {
517         .tf_load                = ata_tf_load,
518         .tf_read                = nv_adma_tf_read,
519         .check_atapi_dma        = nv_adma_check_atapi_dma,
520         .exec_command           = ata_exec_command,
521         .check_status           = ata_check_status,
522         .dev_select             = ata_std_dev_select,
523         .bmdma_setup            = ata_bmdma_setup,
524         .bmdma_start            = ata_bmdma_start,
525         .bmdma_stop             = ata_bmdma_stop,
526         .bmdma_status           = ata_bmdma_status,
527         .qc_defer               = ata_std_qc_defer,
528         .qc_prep                = nv_adma_qc_prep,
529         .qc_issue               = nv_adma_qc_issue,
530         .freeze                 = nv_adma_freeze,
531         .thaw                   = nv_adma_thaw,
532         .error_handler          = nv_adma_error_handler,
533         .post_internal_cmd      = nv_adma_post_internal_cmd,
534         .data_xfer              = ata_data_xfer,
535         .irq_clear              = nv_adma_irq_clear,
536         .irq_on                 = ata_irq_on,
537         .scr_read               = nv_scr_read,
538         .scr_write              = nv_scr_write,
539         .port_start             = nv_adma_port_start,
540         .port_stop              = nv_adma_port_stop,
541 #ifdef CONFIG_PM
542         .port_suspend           = nv_adma_port_suspend,
543         .port_resume            = nv_adma_port_resume,
544 #endif
545         .host_stop              = nv_adma_host_stop,
546 };
547
548 static const struct ata_port_operations nv_swncq_ops = {
549         .tf_load                = ata_tf_load,
550         .tf_read                = ata_tf_read,
551         .exec_command           = ata_exec_command,
552         .check_status           = ata_check_status,
553         .dev_select             = ata_std_dev_select,
554         .bmdma_setup            = ata_bmdma_setup,
555         .bmdma_start            = ata_bmdma_start,
556         .bmdma_stop             = ata_bmdma_stop,
557         .bmdma_status           = ata_bmdma_status,
558         .qc_defer               = ata_std_qc_defer,
559         .qc_prep                = nv_swncq_qc_prep,
560         .qc_issue               = nv_swncq_qc_issue,
561         .freeze                 = nv_mcp55_freeze,
562         .thaw                   = nv_mcp55_thaw,
563         .error_handler          = nv_swncq_error_handler,
564         .post_internal_cmd      = ata_bmdma_post_internal_cmd,
565         .data_xfer              = ata_data_xfer,
566         .irq_clear              = ata_bmdma_irq_clear,
567         .irq_on                 = ata_irq_on,
568         .scr_read               = nv_scr_read,
569         .scr_write              = nv_scr_write,
570 #ifdef CONFIG_PM
571         .port_suspend           = nv_swncq_port_suspend,
572         .port_resume            = nv_swncq_port_resume,
573 #endif
574         .port_start             = nv_swncq_port_start,
575 };
576
577 static const struct ata_port_info nv_port_info[] = {
578         /* generic */
579         {
580                 .sht            = &nv_sht,
581                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
582                 .pio_mask       = NV_PIO_MASK,
583                 .mwdma_mask     = NV_MWDMA_MASK,
584                 .udma_mask      = NV_UDMA_MASK,
585                 .port_ops       = &nv_generic_ops,
586                 .irq_handler    = nv_generic_interrupt,
587         },
588         /* nforce2/3 */
589         {
590                 .sht            = &nv_sht,
591                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
592                 .pio_mask       = NV_PIO_MASK,
593                 .mwdma_mask     = NV_MWDMA_MASK,
594                 .udma_mask      = NV_UDMA_MASK,
595                 .port_ops       = &nv_nf2_ops,
596                 .irq_handler    = nv_nf2_interrupt,
597         },
598         /* ck804 */
599         {
600                 .sht            = &nv_sht,
601                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY,
602                 .pio_mask       = NV_PIO_MASK,
603                 .mwdma_mask     = NV_MWDMA_MASK,
604                 .udma_mask      = NV_UDMA_MASK,
605                 .port_ops       = &nv_ck804_ops,
606                 .irq_handler    = nv_ck804_interrupt,
607         },
608         /* ADMA */
609         {
610                 .sht            = &nv_adma_sht,
611                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
612                                   ATA_FLAG_MMIO | ATA_FLAG_NCQ,
613                 .pio_mask       = NV_PIO_MASK,
614                 .mwdma_mask     = NV_MWDMA_MASK,
615                 .udma_mask      = NV_UDMA_MASK,
616                 .port_ops       = &nv_adma_ops,
617                 .irq_handler    = nv_adma_interrupt,
618         },
619         /* SWNCQ */
620         {
621                 .sht            = &nv_swncq_sht,
622                 .flags          = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
623                                   ATA_FLAG_NCQ,
624                 .pio_mask       = NV_PIO_MASK,
625                 .mwdma_mask     = NV_MWDMA_MASK,
626                 .udma_mask      = NV_UDMA_MASK,
627                 .port_ops       = &nv_swncq_ops,
628                 .irq_handler    = nv_swncq_interrupt,
629         },
630 };
631
632 MODULE_AUTHOR("NVIDIA");
633 MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
634 MODULE_LICENSE("GPL");
635 MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
636 MODULE_VERSION(DRV_VERSION);
637
638 static int adma_enabled = 1;
639 static int swncq_enabled;
640
641 static void nv_adma_register_mode(struct ata_port *ap)
642 {
643         struct nv_adma_port_priv *pp = ap->private_data;
644         void __iomem *mmio = pp->ctl_block;
645         u16 tmp, status;
646         int count = 0;
647
648         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
649                 return;
650
651         status = readw(mmio + NV_ADMA_STAT);
652         while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
653                 ndelay(50);
654                 status = readw(mmio + NV_ADMA_STAT);
655                 count++;
656         }
657         if (count == 20)
658                 ata_port_printk(ap, KERN_WARNING,
659                         "timeout waiting for ADMA IDLE, stat=0x%hx\n",
660                         status);
661
662         tmp = readw(mmio + NV_ADMA_CTL);
663         writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
664
665         count = 0;
666         status = readw(mmio + NV_ADMA_STAT);
667         while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
668                 ndelay(50);
669                 status = readw(mmio + NV_ADMA_STAT);
670                 count++;
671         }
672         if (count == 20)
673                 ata_port_printk(ap, KERN_WARNING,
674                          "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
675                          status);
676
677         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
678 }
679
680 static void nv_adma_mode(struct ata_port *ap)
681 {
682         struct nv_adma_port_priv *pp = ap->private_data;
683         void __iomem *mmio = pp->ctl_block;
684         u16 tmp, status;
685         int count = 0;
686
687         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
688                 return;
689
690         WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
691
692         tmp = readw(mmio + NV_ADMA_CTL);
693         writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
694
695         status = readw(mmio + NV_ADMA_STAT);
696         while (((status & NV_ADMA_STAT_LEGACY) ||
697               !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
698                 ndelay(50);
699                 status = readw(mmio + NV_ADMA_STAT);
700                 count++;
701         }
702         if (count == 20)
703                 ata_port_printk(ap, KERN_WARNING,
704                         "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
705                         status);
706
707         pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
708 }
709
710 static int nv_adma_slave_config(struct scsi_device *sdev)
711 {
712         struct ata_port *ap = ata_shost_to_port(sdev->host);
713         struct nv_adma_port_priv *pp = ap->private_data;
714         struct nv_adma_port_priv *port0, *port1;
715         struct scsi_device *sdev0, *sdev1;
716         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
717         unsigned long segment_boundary, flags;
718         unsigned short sg_tablesize;
719         int rc;
720         int adma_enable;
721         u32 current_reg, new_reg, config_mask;
722
723         rc = ata_scsi_slave_config(sdev);
724
725         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
726                 /* Not a proper libata device, ignore */
727                 return rc;
728
729         spin_lock_irqsave(ap->lock, flags);
730
731         if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
732                 /*
733                  * NVIDIA reports that ADMA mode does not support ATAPI commands.
734                  * Therefore ATAPI commands are sent through the legacy interface.
735                  * However, the legacy interface only supports 32-bit DMA.
736                  * Restrict DMA parameters as required by the legacy interface
737                  * when an ATAPI device is connected.
738                  */
739                 segment_boundary = ATA_DMA_BOUNDARY;
740                 /* Subtract 1 since an extra entry may be needed for padding, see
741                    libata-scsi.c */
742                 sg_tablesize = LIBATA_MAX_PRD - 1;
743
744                 /* Since the legacy DMA engine is in use, we need to disable ADMA
745                    on the port. */
746                 adma_enable = 0;
747                 nv_adma_register_mode(ap);
748         } else {
749                 segment_boundary = NV_ADMA_DMA_BOUNDARY;
750                 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
751                 adma_enable = 1;
752         }
753
754         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
755
756         if (ap->port_no == 1)
757                 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
758                               NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
759         else
760                 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
761                               NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
762
763         if (adma_enable) {
764                 new_reg = current_reg | config_mask;
765                 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
766         } else {
767                 new_reg = current_reg & ~config_mask;
768                 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
769         }
770
771         if (current_reg != new_reg)
772                 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
773
774         port0 = ap->host->ports[0]->private_data;
775         port1 = ap->host->ports[1]->private_data;
776         sdev0 = ap->host->ports[0]->link.device[0].sdev;
777         sdev1 = ap->host->ports[1]->link.device[0].sdev;
778         if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
779             (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
780                 /** We have to set the DMA mask to 32-bit if either port is in
781                     ATAPI mode, since they are on the same PCI device which is
782                     used for DMA mapping. If we set the mask we also need to set
783                     the bounce limit on both ports to ensure that the block
784                     layer doesn't feed addresses that cause DMA mapping to
785                     choke. If either SCSI device is not allocated yet, it's OK
786                     since that port will discover its correct setting when it
787                     does get allocated.
788                     Note: Setting 32-bit mask should not fail. */
789                 if (sdev0)
790                         blk_queue_bounce_limit(sdev0->request_queue,
791                                                ATA_DMA_MASK);
792                 if (sdev1)
793                         blk_queue_bounce_limit(sdev1->request_queue,
794                                                ATA_DMA_MASK);
795
796                 pci_set_dma_mask(pdev, ATA_DMA_MASK);
797         } else {
798                 /** This shouldn't fail as it was set to this value before */
799                 pci_set_dma_mask(pdev, pp->adma_dma_mask);
800                 if (sdev0)
801                         blk_queue_bounce_limit(sdev0->request_queue,
802                                                pp->adma_dma_mask);
803                 if (sdev1)
804                         blk_queue_bounce_limit(sdev1->request_queue,
805                                                pp->adma_dma_mask);
806         }
807
808         blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
809         blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize);
810         ata_port_printk(ap, KERN_INFO,
811                 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
812                 (unsigned long long)*ap->host->dev->dma_mask,
813                 segment_boundary, sg_tablesize);
814
815         spin_unlock_irqrestore(ap->lock, flags);
816
817         return rc;
818 }
819
820 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
821 {
822         struct nv_adma_port_priv *pp = qc->ap->private_data;
823         return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
824 }
825
826 static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
827 {
828         /* Other than when internal or pass-through commands are executed,
829            the only time this function will be called in ADMA mode will be
830            if a command fails. In the failure case we don't care about going
831            into register mode with ADMA commands pending, as the commands will
832            all shortly be aborted anyway. We assume that NCQ commands are not
833            issued via passthrough, which is the only way that switching into
834            ADMA mode could abort outstanding commands. */
835         nv_adma_register_mode(ap);
836
837         ata_tf_read(ap, tf);
838 }
839
840 static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
841 {
842         unsigned int idx = 0;
843
844         if (tf->flags & ATA_TFLAG_ISADDR) {
845                 if (tf->flags & ATA_TFLAG_LBA48) {
846                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR   << 8) | tf->hob_feature | WNB);
847                         cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
848                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAL  << 8) | tf->hob_lbal);
849                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAM  << 8) | tf->hob_lbam);
850                         cpb[idx++] = cpu_to_le16((ATA_REG_LBAH  << 8) | tf->hob_lbah);
851                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature);
852                 } else
853                         cpb[idx++] = cpu_to_le16((ATA_REG_ERR    << 8) | tf->feature | WNB);
854
855                 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT  << 8) | tf->nsect);
856                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL   << 8) | tf->lbal);
857                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM   << 8) | tf->lbam);
858                 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH   << 8) | tf->lbah);
859         }
860
861         if (tf->flags & ATA_TFLAG_DEVICE)
862                 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
863
864         cpb[idx++] = cpu_to_le16((ATA_REG_CMD    << 8) | tf->command | CMDEND);
865
866         while (idx < 12)
867                 cpb[idx++] = cpu_to_le16(IGN);
868
869         return idx;
870 }
871
872 static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
873 {
874         struct nv_adma_port_priv *pp = ap->private_data;
875         u8 flags = pp->cpb[cpb_num].resp_flags;
876
877         VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
878
879         if (unlikely((force_err ||
880                      flags & (NV_CPB_RESP_ATA_ERR |
881                               NV_CPB_RESP_CMD_ERR |
882                               NV_CPB_RESP_CPB_ERR)))) {
883                 struct ata_eh_info *ehi = &ap->link.eh_info;
884                 int freeze = 0;
885
886                 ata_ehi_clear_desc(ehi);
887                 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
888                 if (flags & NV_CPB_RESP_ATA_ERR) {
889                         ata_ehi_push_desc(ehi, "ATA error");
890                         ehi->err_mask |= AC_ERR_DEV;
891                 } else if (flags & NV_CPB_RESP_CMD_ERR) {
892                         ata_ehi_push_desc(ehi, "CMD error");
893                         ehi->err_mask |= AC_ERR_DEV;
894                 } else if (flags & NV_CPB_RESP_CPB_ERR) {
895                         ata_ehi_push_desc(ehi, "CPB error");
896                         ehi->err_mask |= AC_ERR_SYSTEM;
897                         freeze = 1;
898                 } else {
899                         /* notifier error, but no error in CPB flags? */
900                         ata_ehi_push_desc(ehi, "unknown");
901                         ehi->err_mask |= AC_ERR_OTHER;
902                         freeze = 1;
903                 }
904                 /* Kill all commands. EH will determine what actually failed. */
905                 if (freeze)
906                         ata_port_freeze(ap);
907                 else
908                         ata_port_abort(ap);
909                 return 1;
910         }
911
912         if (likely(flags & NV_CPB_RESP_DONE)) {
913                 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num);
914                 VPRINTK("CPB flags done, flags=0x%x\n", flags);
915                 if (likely(qc)) {
916                         DPRINTK("Completing qc from tag %d\n", cpb_num);
917                         ata_qc_complete(qc);
918                 } else {
919                         struct ata_eh_info *ehi = &ap->link.eh_info;
920                         /* Notifier bits set without a command may indicate the drive
921                            is misbehaving. Raise host state machine violation on this
922                            condition. */
923                         ata_port_printk(ap, KERN_ERR,
924                                         "notifier for tag %d with no cmd?\n",
925                                         cpb_num);
926                         ehi->err_mask |= AC_ERR_HSM;
927                         ehi->action |= ATA_EH_RESET;
928                         ata_port_freeze(ap);
929                         return 1;
930                 }
931         }
932         return 0;
933 }
934
935 static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
936 {
937         struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
938
939         /* freeze if hotplugged */
940         if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
941                 ata_port_freeze(ap);
942                 return 1;
943         }
944
945         /* bail out if not our interrupt */
946         if (!(irq_stat & NV_INT_DEV))
947                 return 0;
948
949         /* DEV interrupt w/ no active qc? */
950         if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
951                 ata_check_status(ap);
952                 return 1;
953         }
954
955         /* handle interrupt */
956         return ata_host_intr(ap, qc);
957 }
958
959 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
960 {
961         struct ata_host *host = dev_instance;
962         int i, handled = 0;
963         u32 notifier_clears[2];
964
965         spin_lock(&host->lock);
966
967         for (i = 0; i < host->n_ports; i++) {
968                 struct ata_port *ap = host->ports[i];
969                 notifier_clears[i] = 0;
970
971                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
972                         struct nv_adma_port_priv *pp = ap->private_data;
973                         void __iomem *mmio = pp->ctl_block;
974                         u16 status;
975                         u32 gen_ctl;
976                         u32 notifier, notifier_error;
977
978                         /* if ADMA is disabled, use standard ata interrupt handler */
979                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
980                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
981                                         >> (NV_INT_PORT_SHIFT * i);
982                                 handled += nv_host_intr(ap, irq_stat);
983                                 continue;
984                         }
985
986                         /* if in ATA register mode, check for standard interrupts */
987                         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
988                                 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
989                                         >> (NV_INT_PORT_SHIFT * i);
990                                 if (ata_tag_valid(ap->link.active_tag))
991                                         /** NV_INT_DEV indication seems unreliable at times
992                                             at least in ADMA mode. Force it on always when a
993                                             command is active, to prevent losing interrupts. */
994                                         irq_stat |= NV_INT_DEV;
995                                 handled += nv_host_intr(ap, irq_stat);
996                         }
997
998                         notifier = readl(mmio + NV_ADMA_NOTIFIER);
999                         notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1000                         notifier_clears[i] = notifier | notifier_error;
1001
1002                         gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1003
1004                         if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
1005                             !notifier_error)
1006                                 /* Nothing to do */
1007                                 continue;
1008
1009                         status = readw(mmio + NV_ADMA_STAT);
1010
1011                         /* Clear status. Ensure the controller sees the clearing before we start
1012                            looking at any of the CPB statuses, so that any CPB completions after
1013                            this point in the handler will raise another interrupt. */
1014                         writew(status, mmio + NV_ADMA_STAT);
1015                         readw(mmio + NV_ADMA_STAT); /* flush posted write */
1016                         rmb();
1017
1018                         handled++; /* irq handled if we got here */
1019
1020                         /* freeze if hotplugged or controller error */
1021                         if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
1022                                                NV_ADMA_STAT_HOTUNPLUG |
1023                                                NV_ADMA_STAT_TIMEOUT |
1024                                                NV_ADMA_STAT_SERROR))) {
1025                                 struct ata_eh_info *ehi = &ap->link.eh_info;
1026
1027                                 ata_ehi_clear_desc(ehi);
1028                                 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
1029                                 if (status & NV_ADMA_STAT_TIMEOUT) {
1030                                         ehi->err_mask |= AC_ERR_SYSTEM;
1031                                         ata_ehi_push_desc(ehi, "timeout");
1032                                 } else if (status & NV_ADMA_STAT_HOTPLUG) {
1033                                         ata_ehi_hotplugged(ehi);
1034                                         ata_ehi_push_desc(ehi, "hotplug");
1035                                 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
1036                                         ata_ehi_hotplugged(ehi);
1037                                         ata_ehi_push_desc(ehi, "hot unplug");
1038                                 } else if (status & NV_ADMA_STAT_SERROR) {
1039                                         /* let libata analyze SError and figure out the cause */
1040                                         ata_ehi_push_desc(ehi, "SError");
1041                                 } else
1042                                         ata_ehi_push_desc(ehi, "unknown");
1043                                 ata_port_freeze(ap);
1044                                 continue;
1045                         }
1046
1047                         if (status & (NV_ADMA_STAT_DONE |
1048                                       NV_ADMA_STAT_CPBERR |
1049                                       NV_ADMA_STAT_CMD_COMPLETE)) {
1050                                 u32 check_commands = notifier_clears[i];
1051                                 int pos, error = 0;
1052
1053                                 if (status & NV_ADMA_STAT_CPBERR) {
1054                                         /* Check all active commands */
1055                                         if (ata_tag_valid(ap->link.active_tag))
1056                                                 check_commands = 1 <<
1057                                                         ap->link.active_tag;
1058                                         else
1059                                                 check_commands = ap->
1060                                                         link.sactive;
1061                                 }
1062
1063                                 /** Check CPBs for completed commands */
1064                                 while ((pos = ffs(check_commands)) && !error) {
1065                                         pos--;
1066                                         error = nv_adma_check_cpb(ap, pos,
1067                                                 notifier_error & (1 << pos));
1068                                         check_commands &= ~(1 << pos);
1069                                 }
1070                         }
1071                 }
1072         }
1073
1074         if (notifier_clears[0] || notifier_clears[1]) {
1075                 /* Note: Both notifier clear registers must be written
1076                    if either is set, even if one is zero, according to NVIDIA. */
1077                 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1078                 writel(notifier_clears[0], pp->notifier_clear_block);
1079                 pp = host->ports[1]->private_data;
1080                 writel(notifier_clears[1], pp->notifier_clear_block);
1081         }
1082
1083         spin_unlock(&host->lock);
1084
1085         return IRQ_RETVAL(handled);
1086 }
1087
1088 static void nv_adma_freeze(struct ata_port *ap)
1089 {
1090         struct nv_adma_port_priv *pp = ap->private_data;
1091         void __iomem *mmio = pp->ctl_block;
1092         u16 tmp;
1093
1094         nv_ck804_freeze(ap);
1095
1096         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1097                 return;
1098
1099         /* clear any outstanding CK804 notifications */
1100         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1101                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1102
1103         /* Disable interrupt */
1104         tmp = readw(mmio + NV_ADMA_CTL);
1105         writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1106                 mmio + NV_ADMA_CTL);
1107         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1108 }
1109
1110 static void nv_adma_thaw(struct ata_port *ap)
1111 {
1112         struct nv_adma_port_priv *pp = ap->private_data;
1113         void __iomem *mmio = pp->ctl_block;
1114         u16 tmp;
1115
1116         nv_ck804_thaw(ap);
1117
1118         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1119                 return;
1120
1121         /* Enable interrupt */
1122         tmp = readw(mmio + NV_ADMA_CTL);
1123         writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
1124                 mmio + NV_ADMA_CTL);
1125         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1126 }
1127
1128 static void nv_adma_irq_clear(struct ata_port *ap)
1129 {
1130         struct nv_adma_port_priv *pp = ap->private_data;
1131         void __iomem *mmio = pp->ctl_block;
1132         u32 notifier_clears[2];
1133
1134         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
1135                 ata_bmdma_irq_clear(ap);
1136                 return;
1137         }
1138
1139         /* clear any outstanding CK804 notifications */
1140         writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
1141                 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1142
1143         /* clear ADMA status */
1144         writew(0xffff, mmio + NV_ADMA_STAT);
1145
1146         /* clear notifiers - note both ports need to be written with
1147            something even though we are only clearing on one */
1148         if (ap->port_no == 0) {
1149                 notifier_clears[0] = 0xFFFFFFFF;
1150                 notifier_clears[1] = 0;
1151         } else {
1152                 notifier_clears[0] = 0;
1153                 notifier_clears[1] = 0xFFFFFFFF;
1154         }
1155         pp = ap->host->ports[0]->private_data;
1156         writel(notifier_clears[0], pp->notifier_clear_block);
1157         pp = ap->host->ports[1]->private_data;
1158         writel(notifier_clears[1], pp->notifier_clear_block);
1159 }
1160
1161 static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
1162 {
1163         struct nv_adma_port_priv *pp = qc->ap->private_data;
1164
1165         if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
1166                 ata_bmdma_post_internal_cmd(qc);
1167 }
1168
1169 static int nv_adma_port_start(struct ata_port *ap)
1170 {
1171         struct device *dev = ap->host->dev;
1172         struct nv_adma_port_priv *pp;
1173         int rc;
1174         void *mem;
1175         dma_addr_t mem_dma;
1176         void __iomem *mmio;
1177         struct pci_dev *pdev = to_pci_dev(dev);
1178         u16 tmp;
1179
1180         VPRINTK("ENTER\n");
1181
1182         /* Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1183            pad buffers */
1184         rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1185         if (rc)
1186                 return rc;
1187         rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1188         if (rc)
1189                 return rc;
1190
1191         rc = ata_port_start(ap);
1192         if (rc)
1193                 return rc;
1194
1195         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1196         if (!pp)
1197                 return -ENOMEM;
1198
1199         mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
1200                ap->port_no * NV_ADMA_PORT_SIZE;
1201         pp->ctl_block = mmio;
1202         pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
1203         pp->notifier_clear_block = pp->gen_block +
1204                NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1205
1206         /* Now that the legacy PRD and padding buffer are allocated we can
1207            safely raise the DMA mask to allocate the CPB/APRD table.
1208            These are allowed to fail since we store the value that ends up
1209            being used to set as the bounce limit in slave_config later if
1210            needed. */
1211         pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1212         pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1213         pp->adma_dma_mask = *dev->dma_mask;
1214
1215         mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1216                                   &mem_dma, GFP_KERNEL);
1217         if (!mem)
1218                 return -ENOMEM;
1219         memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1220
1221         /*
1222          * First item in chunk of DMA memory:
1223          * 128-byte command parameter block (CPB)
1224          * one for each command tag
1225          */
1226         pp->cpb     = mem;
1227         pp->cpb_dma = mem_dma;
1228
1229         writel(mem_dma & 0xFFFFFFFF,    mmio + NV_ADMA_CPB_BASE_LOW);
1230         writel((mem_dma >> 16) >> 16,   mmio + NV_ADMA_CPB_BASE_HIGH);
1231
1232         mem     += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1233         mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1234
1235         /*
1236          * Second item: block of ADMA_SGTBL_LEN s/g entries
1237          */
1238         pp->aprd = mem;
1239         pp->aprd_dma = mem_dma;
1240
1241         ap->private_data = pp;
1242
1243         /* clear any outstanding interrupt conditions */
1244         writew(0xffff, mmio + NV_ADMA_STAT);
1245
1246         /* initialize port variables */
1247         pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1248
1249         /* clear CPB fetch count */
1250         writew(0, mmio + NV_ADMA_CPB_COUNT);
1251
1252         /* clear GO for register mode, enable interrupt */
1253         tmp = readw(mmio + NV_ADMA_CTL);
1254         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1255                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1256
1257         tmp = readw(mmio + NV_ADMA_CTL);
1258         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1259         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1260         udelay(1);
1261         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1262         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1263
1264         return 0;
1265 }
1266
1267 static void nv_adma_port_stop(struct ata_port *ap)
1268 {
1269         struct nv_adma_port_priv *pp = ap->private_data;
1270         void __iomem *mmio = pp->ctl_block;
1271
1272         VPRINTK("ENTER\n");
1273         writew(0, mmio + NV_ADMA_CTL);
1274 }
1275
1276 #ifdef CONFIG_PM
1277 static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1278 {
1279         struct nv_adma_port_priv *pp = ap->private_data;
1280         void __iomem *mmio = pp->ctl_block;
1281
1282         /* Go to register mode - clears GO */
1283         nv_adma_register_mode(ap);
1284
1285         /* clear CPB fetch count */
1286         writew(0, mmio + NV_ADMA_CPB_COUNT);
1287
1288         /* disable interrupt, shut down port */
1289         writew(0, mmio + NV_ADMA_CTL);
1290
1291         return 0;
1292 }
1293
1294 static int nv_adma_port_resume(struct ata_port *ap)
1295 {
1296         struct nv_adma_port_priv *pp = ap->private_data;
1297         void __iomem *mmio = pp->ctl_block;
1298         u16 tmp;
1299
1300         /* set CPB block location */
1301         writel(pp->cpb_dma & 0xFFFFFFFF,        mmio + NV_ADMA_CPB_BASE_LOW);
1302         writel((pp->cpb_dma >> 16) >> 16,       mmio + NV_ADMA_CPB_BASE_HIGH);
1303
1304         /* clear any outstanding interrupt conditions */
1305         writew(0xffff, mmio + NV_ADMA_STAT);
1306
1307         /* initialize port variables */
1308         pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1309
1310         /* clear CPB fetch count */
1311         writew(0, mmio + NV_ADMA_CPB_COUNT);
1312
1313         /* clear GO for register mode, enable interrupt */
1314         tmp = readw(mmio + NV_ADMA_CTL);
1315         writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1316                 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
1317
1318         tmp = readw(mmio + NV_ADMA_CTL);
1319         writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1320         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1321         udelay(1);
1322         writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1323         readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1324
1325         return 0;
1326 }
1327 #endif
1328
1329 static void nv_adma_setup_port(struct ata_port *ap)
1330 {
1331         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1332         struct ata_ioports *ioport = &ap->ioaddr;
1333
1334         VPRINTK("ENTER\n");
1335
1336         mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
1337
1338         ioport->cmd_addr        = mmio;
1339         ioport->data_addr       = mmio + (ATA_REG_DATA * 4);
1340         ioport->error_addr      =
1341         ioport->feature_addr    = mmio + (ATA_REG_ERR * 4);
1342         ioport->nsect_addr      = mmio + (ATA_REG_NSECT * 4);
1343         ioport->lbal_addr       = mmio + (ATA_REG_LBAL * 4);
1344         ioport->lbam_addr       = mmio + (ATA_REG_LBAM * 4);
1345         ioport->lbah_addr       = mmio + (ATA_REG_LBAH * 4);
1346         ioport->device_addr     = mmio + (ATA_REG_DEVICE * 4);
1347         ioport->status_addr     =
1348         ioport->command_addr    = mmio + (ATA_REG_STATUS * 4);
1349         ioport->altstatus_addr  =
1350         ioport->ctl_addr        = mmio + 0x20;
1351 }
1352
1353 static int nv_adma_host_init(struct ata_host *host)
1354 {
1355         struct pci_dev *pdev = to_pci_dev(host->dev);
1356         unsigned int i;
1357         u32 tmp32;
1358
1359         VPRINTK("ENTER\n");
1360
1361         /* enable ADMA on the ports */
1362         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1363         tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1364                  NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1365                  NV_MCP_SATA_CFG_20_PORT1_EN |
1366                  NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1367
1368         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1369
1370         for (i = 0; i < host->n_ports; i++)
1371                 nv_adma_setup_port(host->ports[i]);
1372
1373         return 0;
1374 }
1375
1376 static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1377                               struct scatterlist *sg,
1378                               int idx,
1379                               struct nv_adma_prd *aprd)
1380 {
1381         u8 flags = 0;
1382         if (qc->tf.flags & ATA_TFLAG_WRITE)
1383                 flags |= NV_APRD_WRITE;
1384         if (idx == qc->n_elem - 1)
1385                 flags |= NV_APRD_END;
1386         else if (idx != 4)
1387                 flags |= NV_APRD_CONT;
1388
1389         aprd->addr  = cpu_to_le64(((u64)sg_dma_address(sg)));
1390         aprd->len   = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
1391         aprd->flags = flags;
1392         aprd->packet_len = 0;
1393 }
1394
1395 static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1396 {
1397         struct nv_adma_port_priv *pp = qc->ap->private_data;
1398         struct nv_adma_prd *aprd;
1399         struct scatterlist *sg;
1400         unsigned int si;
1401
1402         VPRINTK("ENTER\n");
1403
1404         for_each_sg(qc->sg, sg, qc->n_elem, si) {
1405                 aprd = (si < 5) ? &cpb->aprd[si] :
1406                                &pp->aprd[NV_ADMA_SGTBL_LEN * qc->tag + (si-5)];
1407                 nv_adma_fill_aprd(qc, sg, si, aprd);
1408         }
1409         if (si > 5)
1410                 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->tag)));
1411         else
1412                 cpb->next_aprd = cpu_to_le64(0);
1413 }
1414
1415 static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1416 {
1417         struct nv_adma_port_priv *pp = qc->ap->private_data;
1418
1419         /* ADMA engine can only be used for non-ATAPI DMA commands,
1420            or interrupt-driven no-data commands. */
1421         if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
1422            (qc->tf.flags & ATA_TFLAG_POLLING))
1423                 return 1;
1424
1425         if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
1426            (qc->tf.protocol == ATA_PROT_NODATA))
1427                 return 0;
1428
1429         return 1;
1430 }
1431
1432 static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1433 {
1434         struct nv_adma_port_priv *pp = qc->ap->private_data;
1435         struct nv_adma_cpb *cpb = &pp->cpb[qc->tag];
1436         u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
1437                        NV_CPB_CTL_IEN;
1438
1439         if (nv_adma_use_reg_mode(qc)) {
1440                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1441                         (qc->flags & ATA_QCFLAG_DMAMAP));
1442                 nv_adma_register_mode(qc->ap);
1443                 ata_qc_prep(qc);
1444                 return;
1445         }
1446
1447         cpb->resp_flags = NV_CPB_RESP_DONE;
1448         wmb();
1449         cpb->ctl_flags = 0;
1450         wmb();
1451
1452         cpb->len                = 3;
1453         cpb->tag                = qc->tag;
1454         cpb->next_cpb_idx       = 0;
1455
1456         /* turn on NCQ flags for NCQ commands */
1457         if (qc->tf.protocol == ATA_PROT_NCQ)
1458                 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1459
1460         VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1461
1462         nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1463
1464         if (qc->flags & ATA_QCFLAG_DMAMAP) {
1465                 nv_adma_fill_sg(qc, cpb);
1466                 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1467         } else
1468                 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
1469
1470         /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1471            until we are finished filling in all of the contents */
1472         wmb();
1473         cpb->ctl_flags = ctl_flags;
1474         wmb();
1475         cpb->resp_flags = 0;
1476 }
1477
1478 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1479 {
1480         struct nv_adma_port_priv *pp = qc->ap->private_data;
1481         void __iomem *mmio = pp->ctl_block;
1482         int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
1483
1484         VPRINTK("ENTER\n");
1485
1486         /* We can't handle result taskfile with NCQ commands, since
1487            retrieving the taskfile switches us out of ADMA mode and would abort
1488            existing commands. */
1489         if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1490                      (qc->flags & ATA_QCFLAG_RESULT_TF))) {
1491                 ata_dev_printk(qc->dev, KERN_ERR,
1492                         "NCQ w/ RESULT_TF not allowed\n");
1493                 return AC_ERR_SYSTEM;
1494         }
1495
1496         if (nv_adma_use_reg_mode(qc)) {
1497                 /* use ATA register mode */
1498                 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
1499                 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1500                         (qc->flags & ATA_QCFLAG_DMAMAP));
1501                 nv_adma_register_mode(qc->ap);
1502                 return ata_qc_issue_prot(qc);
1503         } else
1504                 nv_adma_mode(qc->ap);
1505
1506         /* write append register, command tag in lower 8 bits
1507            and (number of cpbs to append -1) in top 8 bits */
1508         wmb();
1509
1510         if (curr_ncq != pp->last_issue_ncq) {
1511                 /* Seems to need some delay before switching between NCQ and
1512                    non-NCQ commands, else we get command timeouts and such. */
1513                 udelay(20);
1514                 pp->last_issue_ncq = curr_ncq;
1515         }
1516
1517         writew(qc->tag, mmio + NV_ADMA_APPEND);
1518
1519         DPRINTK("Issued tag %u\n", qc->tag);
1520
1521         return 0;
1522 }
1523
1524 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
1525 {
1526         struct ata_host *host = dev_instance;
1527         unsigned int i;
1528         unsigned int handled = 0;
1529         unsigned long flags;
1530
1531         spin_lock_irqsave(&host->lock, flags);
1532
1533         for (i = 0; i < host->n_ports; i++) {
1534                 struct ata_port *ap;
1535
1536                 ap = host->ports[i];
1537                 if (ap &&
1538                     !(ap->flags & ATA_FLAG_DISABLED)) {
1539                         struct ata_queued_cmd *qc;
1540
1541                         qc = ata_qc_from_tag(ap, ap->link.active_tag);
1542                         if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)))
1543                                 handled += ata_host_intr(ap, qc);
1544                         else
1545                                 // No request pending?  Clear interrupt status
1546                                 // anyway, in case there's one pending.
1547                                 ap->ops->check_status(ap);
1548                 }
1549
1550         }
1551
1552         spin_unlock_irqrestore(&host->lock, flags);
1553
1554         return IRQ_RETVAL(handled);
1555 }
1556
1557 static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
1558 {
1559         int i, handled = 0;
1560
1561         for (i = 0; i < host->n_ports; i++) {
1562                 struct ata_port *ap = host->ports[i];
1563
1564                 if (ap && !(ap->flags & ATA_FLAG_DISABLED))
1565                         handled += nv_host_intr(ap, irq_stat);
1566
1567                 irq_stat >>= NV_INT_PORT_SHIFT;
1568         }
1569
1570         return IRQ_RETVAL(handled);
1571 }
1572
1573 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
1574 {
1575         struct ata_host *host = dev_instance;
1576         u8 irq_stat;
1577         irqreturn_t ret;
1578
1579         spin_lock(&host->lock);
1580         irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
1581         ret = nv_do_interrupt(host, irq_stat);
1582         spin_unlock(&host->lock);
1583
1584         return ret;
1585 }
1586
1587 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
1588 {
1589         struct ata_host *host = dev_instance;
1590         u8 irq_stat;
1591         irqreturn_t ret;
1592
1593         spin_lock(&host->lock);
1594         irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1595         ret = nv_do_interrupt(host, irq_stat);
1596         spin_unlock(&host->lock);
1597
1598         return ret;
1599 }
1600
1601 static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val)
1602 {
1603         if (sc_reg > SCR_CONTROL)
1604                 return -EINVAL;
1605
1606         *val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
1607         return 0;
1608 }
1609
1610 static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val)
1611 {
1612         if (sc_reg > SCR_CONTROL)
1613                 return -EINVAL;
1614
1615         iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
1616         return 0;
1617 }
1618
1619 static void nv_nf2_freeze(struct ata_port *ap)
1620 {
1621         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1622         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1623         u8 mask;
1624
1625         mask = ioread8(scr_addr + NV_INT_ENABLE);
1626         mask &= ~(NV_INT_ALL << shift);
1627         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1628 }
1629
1630 static void nv_nf2_thaw(struct ata_port *ap)
1631 {
1632         void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
1633         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1634         u8 mask;
1635
1636         iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
1637
1638         mask = ioread8(scr_addr + NV_INT_ENABLE);
1639         mask |= (NV_INT_MASK << shift);
1640         iowrite8(mask, scr_addr + NV_INT_ENABLE);
1641 }
1642
1643 static void nv_ck804_freeze(struct ata_port *ap)
1644 {
1645         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1646         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1647         u8 mask;
1648
1649         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1650         mask &= ~(NV_INT_ALL << shift);
1651         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1652 }
1653
1654 static void nv_ck804_thaw(struct ata_port *ap)
1655 {
1656         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1657         int shift = ap->port_no * NV_INT_PORT_SHIFT;
1658         u8 mask;
1659
1660         writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1661
1662         mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1663         mask |= (NV_INT_MASK << shift);
1664         writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1665 }
1666
1667 static void nv_mcp55_freeze(struct ata_port *ap)
1668 {
1669         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1670         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1671         u32 mask;
1672
1673         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1674
1675         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1676         mask &= ~(NV_INT_ALL_MCP55 << shift);
1677         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1678         ata_bmdma_freeze(ap);
1679 }
1680
1681 static void nv_mcp55_thaw(struct ata_port *ap)
1682 {
1683         void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1684         int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1685         u32 mask;
1686
1687         writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1688
1689         mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1690         mask |= (NV_INT_MASK_MCP55 << shift);
1691         writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
1692         ata_bmdma_thaw(ap);
1693 }
1694
1695 static int nv_hardreset(struct ata_link *link, unsigned int *class,
1696                         unsigned long deadline)
1697 {
1698         unsigned int dummy;
1699
1700         /* SATA hardreset fails to retrieve proper device signature on
1701          * some controllers.  Don't classify on hardreset.  For more
1702          * info, see http://bugzilla.kernel.org/show_bug.cgi?id=3352
1703          */
1704         return sata_std_hardreset(link, &dummy, deadline);
1705 }
1706
1707 static void nv_error_handler(struct ata_port *ap)
1708 {
1709         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1710                            nv_hardreset, ata_std_postreset);
1711 }
1712
1713 static void nv_adma_error_handler(struct ata_port *ap)
1714 {
1715         struct nv_adma_port_priv *pp = ap->private_data;
1716         if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
1717                 void __iomem *mmio = pp->ctl_block;
1718                 int i;
1719                 u16 tmp;
1720
1721                 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
1722                         u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1723                         u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1724                         u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1725                         u32 status = readw(mmio + NV_ADMA_STAT);
1726                         u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1727                         u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
1728
1729                         ata_port_printk(ap, KERN_ERR,
1730                                 "EH in ADMA mode, notifier 0x%X "
1731                                 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1732                                 "next cpb count 0x%X next cpb idx 0x%x\n",
1733                                 notifier, notifier_error, gen_ctl, status,
1734                                 cpb_count, next_cpb_idx);
1735
1736                         for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
1737                                 struct nv_adma_cpb *cpb = &pp->cpb[i];
1738                                 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
1739                                     ap->link.sactive & (1 << i))
1740                                         ata_port_printk(ap, KERN_ERR,
1741                                                 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1742                                                 i, cpb->ctl_flags, cpb->resp_flags);
1743                         }
1744                 }
1745
1746                 /* Push us back into port register mode for error handling. */
1747                 nv_adma_register_mode(ap);
1748
1749                 /* Mark all of the CPBs as invalid to prevent them from
1750                    being executed */
1751                 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
1752                         pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1753
1754                 /* clear CPB fetch count */
1755                 writew(0, mmio + NV_ADMA_CPB_COUNT);
1756
1757                 /* Reset channel */
1758                 tmp = readw(mmio + NV_ADMA_CTL);
1759                 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1760                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1761                 udelay(1);
1762                 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
1763                 readw(mmio + NV_ADMA_CTL);      /* flush posted write */
1764         }
1765
1766         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1767                            nv_hardreset, ata_std_postreset);
1768 }
1769
1770 static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1771 {
1772         struct nv_swncq_port_priv *pp = ap->private_data;
1773         struct defer_queue *dq = &pp->defer_queue;
1774
1775         /* queue is full */
1776         WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
1777         dq->defer_bits |= (1 << qc->tag);
1778         dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->tag;
1779 }
1780
1781 static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1782 {
1783         struct nv_swncq_port_priv *pp = ap->private_data;
1784         struct defer_queue *dq = &pp->defer_queue;
1785         unsigned int tag;
1786
1787         if (dq->head == dq->tail)       /* null queue */
1788                 return NULL;
1789
1790         tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1791         dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1792         WARN_ON(!(dq->defer_bits & (1 << tag)));
1793         dq->defer_bits &= ~(1 << tag);
1794
1795         return ata_qc_from_tag(ap, tag);
1796 }
1797
1798 static void nv_swncq_fis_reinit(struct ata_port *ap)
1799 {
1800         struct nv_swncq_port_priv *pp = ap->private_data;
1801
1802         pp->dhfis_bits = 0;
1803         pp->dmafis_bits = 0;
1804         pp->sdbfis_bits = 0;
1805         pp->ncq_flags = 0;
1806 }
1807
1808 static void nv_swncq_pp_reinit(struct ata_port *ap)
1809 {
1810         struct nv_swncq_port_priv *pp = ap->private_data;
1811         struct defer_queue *dq = &pp->defer_queue;
1812
1813         dq->head = 0;
1814         dq->tail = 0;
1815         dq->defer_bits = 0;
1816         pp->qc_active = 0;
1817         pp->last_issue_tag = ATA_TAG_POISON;
1818         nv_swncq_fis_reinit(ap);
1819 }
1820
1821 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1822 {
1823         struct nv_swncq_port_priv *pp = ap->private_data;
1824
1825         writew(fis, pp->irq_block);
1826 }
1827
1828 static void __ata_bmdma_stop(struct ata_port *ap)
1829 {
1830         struct ata_queued_cmd qc;
1831
1832         qc.ap = ap;
1833         ata_bmdma_stop(&qc);
1834 }
1835
1836 static void nv_swncq_ncq_stop(struct ata_port *ap)
1837 {
1838         struct nv_swncq_port_priv *pp = ap->private_data;
1839         unsigned int i;
1840         u32 sactive;
1841         u32 done_mask;
1842
1843         ata_port_printk(ap, KERN_ERR,
1844                         "EH in SWNCQ mode,QC:qc_active 0x%X sactive 0x%X\n",
1845                         ap->qc_active, ap->link.sactive);
1846         ata_port_printk(ap, KERN_ERR,
1847                 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n  "
1848                 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1849                 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1850                 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1851
1852         ata_port_printk(ap, KERN_ERR, "ATA_REG 0x%X ERR_REG 0x%X\n",
1853                         ap->ops->check_status(ap),
1854                         ioread8(ap->ioaddr.error_addr));
1855
1856         sactive = readl(pp->sactive_block);
1857         done_mask = pp->qc_active ^ sactive;
1858
1859         ata_port_printk(ap, KERN_ERR, "tag : dhfis dmafis sdbfis sacitve\n");
1860         for (i = 0; i < ATA_MAX_QUEUE; i++) {
1861                 u8 err = 0;
1862                 if (pp->qc_active & (1 << i))
1863                         err = 0;
1864                 else if (done_mask & (1 << i))
1865                         err = 1;
1866                 else
1867                         continue;
1868
1869                 ata_port_printk(ap, KERN_ERR,
1870                                 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1871                                 (pp->dhfis_bits >> i) & 0x1,
1872                                 (pp->dmafis_bits >> i) & 0x1,
1873                                 (pp->sdbfis_bits >> i) & 0x1,
1874                                 (sactive >> i) & 0x1,
1875                                 (err ? "error! tag doesn't exit" : " "));
1876         }
1877
1878         nv_swncq_pp_reinit(ap);
1879         ap->ops->irq_clear(ap);
1880         __ata_bmdma_stop(ap);
1881         nv_swncq_irq_clear(ap, 0xffff);
1882 }
1883
1884 static void nv_swncq_error_handler(struct ata_port *ap)
1885 {
1886         struct ata_eh_context *ehc = &ap->link.eh_context;
1887
1888         if (ap->link.sactive) {
1889                 nv_swncq_ncq_stop(ap);
1890                 ehc->i.action |= ATA_EH_RESET;
1891         }
1892
1893         ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset,
1894                            nv_hardreset, ata_std_postreset);
1895 }
1896
1897 #ifdef CONFIG_PM
1898 static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1899 {
1900         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1901         u32 tmp;
1902
1903         /* clear irq */
1904         writel(~0, mmio + NV_INT_STATUS_MCP55);
1905
1906         /* disable irq */
1907         writel(0, mmio + NV_INT_ENABLE_MCP55);
1908
1909         /* disable swncq */
1910         tmp = readl(mmio + NV_CTL_MCP55);
1911         tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1912         writel(tmp, mmio + NV_CTL_MCP55);
1913
1914         return 0;
1915 }
1916
1917 static int nv_swncq_port_resume(struct ata_port *ap)
1918 {
1919         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1920         u32 tmp;
1921
1922         /* clear irq */
1923         writel(~0, mmio + NV_INT_STATUS_MCP55);
1924
1925         /* enable irq */
1926         writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1927
1928         /* enable swncq */
1929         tmp = readl(mmio + NV_CTL_MCP55);
1930         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1931
1932         return 0;
1933 }
1934 #endif
1935
1936 static void nv_swncq_host_init(struct ata_host *host)
1937 {
1938         u32 tmp;
1939         void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1940         struct pci_dev *pdev = to_pci_dev(host->dev);
1941         u8 regval;
1942
1943         /* disable  ECO 398 */
1944         pci_read_config_byte(pdev, 0x7f, &regval);
1945         regval &= ~(1 << 7);
1946         pci_write_config_byte(pdev, 0x7f, regval);
1947
1948         /* enable swncq */
1949         tmp = readl(mmio + NV_CTL_MCP55);
1950         VPRINTK("HOST_CTL:0x%X\n", tmp);
1951         writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1952
1953         /* enable irq intr */
1954         tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1955         VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1956         writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1957
1958         /*  clear port irq */
1959         writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1960 }
1961
1962 static int nv_swncq_slave_config(struct scsi_device *sdev)
1963 {
1964         struct ata_port *ap = ata_shost_to_port(sdev->host);
1965         struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1966         struct ata_device *dev;
1967         int rc;
1968         u8 rev;
1969         u8 check_maxtor = 0;
1970         unsigned char model_num[ATA_ID_PROD_LEN + 1];
1971
1972         rc = ata_scsi_slave_config(sdev);
1973         if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1974                 /* Not a proper libata device, ignore */
1975                 return rc;
1976
1977         dev = &ap->link.device[sdev->id];
1978         if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1979                 return rc;
1980
1981         /* if MCP51 and Maxtor, then disable ncq */
1982         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1983                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1984                 check_maxtor = 1;
1985
1986         /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1987         if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1988                 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1989                 pci_read_config_byte(pdev, 0x8, &rev);
1990                 if (rev <= 0xa2)
1991                         check_maxtor = 1;
1992         }
1993
1994         if (!check_maxtor)
1995                 return rc;
1996
1997         ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1998
1999         if (strncmp(model_num, "Maxtor", 6) == 0) {
2000                 ata_scsi_change_queue_depth(sdev, 1);
2001                 ata_dev_printk(dev, KERN_NOTICE,
2002                         "Disabling SWNCQ mode (depth %x)\n", sdev->queue_depth);
2003         }
2004
2005         return rc;
2006 }
2007
2008 static int nv_swncq_port_start(struct ata_port *ap)
2009 {
2010         struct device *dev = ap->host->dev;
2011         void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
2012         struct nv_swncq_port_priv *pp;
2013         int rc;
2014
2015         rc = ata_port_start(ap);
2016         if (rc)
2017                 return rc;
2018
2019         pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
2020         if (!pp)
2021                 return -ENOMEM;
2022
2023         pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
2024                                       &pp->prd_dma, GFP_KERNEL);
2025         if (!pp->prd)
2026                 return -ENOMEM;
2027         memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
2028
2029         ap->private_data = pp;
2030         pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
2031         pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
2032         pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
2033
2034         return 0;
2035 }
2036
2037 static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
2038 {
2039         if (qc->tf.protocol != ATA_PROT_NCQ) {
2040                 ata_qc_prep(qc);
2041                 return;
2042         }
2043
2044         if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2045                 return;
2046
2047         nv_swncq_fill_sg(qc);
2048 }
2049
2050 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
2051 {
2052         struct ata_port *ap = qc->ap;
2053         struct scatterlist *sg;
2054         struct nv_swncq_port_priv *pp = ap->private_data;
2055         struct ata_prd *prd;
2056         unsigned int si, idx;
2057
2058         prd = pp->prd + ATA_MAX_PRD * qc->tag;
2059
2060         idx = 0;
2061         for_each_sg(qc->sg, sg, qc->n_elem, si) {
2062                 u32 addr, offset;
2063                 u32 sg_len, len;
2064
2065                 addr = (u32)sg_dma_address(sg);
2066                 sg_len = sg_dma_len(sg);
2067
2068                 while (sg_len) {
2069                         offset = addr & 0xffff;
2070                         len = sg_len;
2071                         if ((offset + sg_len) > 0x10000)
2072                                 len = 0x10000 - offset;
2073
2074                         prd[idx].addr = cpu_to_le32(addr);
2075                         prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2076
2077                         idx++;
2078                         sg_len -= len;
2079                         addr += len;
2080                 }
2081         }
2082
2083         prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2084 }
2085
2086 static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2087                                           struct ata_queued_cmd *qc)
2088 {
2089         struct nv_swncq_port_priv *pp = ap->private_data;
2090
2091         if (qc == NULL)
2092                 return 0;
2093
2094         DPRINTK("Enter\n");
2095
2096         writel((1 << qc->tag), pp->sactive_block);
2097         pp->last_issue_tag = qc->tag;
2098         pp->dhfis_bits &= ~(1 << qc->tag);
2099         pp->dmafis_bits &= ~(1 << qc->tag);
2100         pp->qc_active |= (0x1 << qc->tag);
2101
2102         ap->ops->tf_load(ap, &qc->tf);   /* load tf registers */
2103         ap->ops->exec_command(ap, &qc->tf);
2104
2105         DPRINTK("Issued tag %u\n", qc->tag);
2106
2107         return 0;
2108 }
2109
2110 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2111 {
2112         struct ata_port *ap = qc->ap;
2113         struct nv_swncq_port_priv *pp = ap->private_data;
2114
2115         if (qc->tf.protocol != ATA_PROT_NCQ)
2116                 return ata_qc_issue_prot(qc);
2117
2118         DPRINTK("Enter\n");
2119
2120         if (!pp->qc_active)
2121                 nv_swncq_issue_atacmd(ap, qc);
2122         else
2123                 nv_swncq_qc_to_dq(ap, qc);      /* add qc to defer queue */
2124
2125         return 0;
2126 }
2127
2128 static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2129 {
2130         u32 serror;
2131         struct ata_eh_info *ehi = &ap->link.eh_info;
2132
2133         ata_ehi_clear_desc(ehi);
2134
2135         /* AHCI needs SError cleared; otherwise, it might lock up */
2136         sata_scr_read(&ap->link, SCR_ERROR, &serror);
2137         sata_scr_write(&ap->link, SCR_ERROR, serror);
2138
2139         /* analyze @irq_stat */
2140         if (fis & NV_SWNCQ_IRQ_ADDED)
2141                 ata_ehi_push_desc(ehi, "hot plug");
2142         else if (fis & NV_SWNCQ_IRQ_REMOVED)
2143                 ata_ehi_push_desc(ehi, "hot unplug");
2144
2145         ata_ehi_hotplugged(ehi);
2146
2147         /* okay, let's hand over to EH */
2148         ehi->serror |= serror;
2149
2150         ata_port_freeze(ap);
2151 }
2152
2153 static int nv_swncq_sdbfis(struct ata_port *ap)
2154 {
2155         struct ata_queued_cmd *qc;
2156         struct nv_swncq_port_priv *pp = ap->private_data;
2157         struct ata_eh_info *ehi = &ap->link.eh_info;
2158         u32 sactive;
2159         int nr_done = 0;
2160         u32 done_mask;
2161         int i;
2162         u8 host_stat;
2163         u8 lack_dhfis = 0;
2164
2165         host_stat = ap->ops->bmdma_status(ap);
2166         if (unlikely(host_stat & ATA_DMA_ERR)) {
2167                 /* error when transfering data to/from memory */
2168                 ata_ehi_clear_desc(ehi);
2169                 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2170                 ehi->err_mask |= AC_ERR_HOST_BUS;
2171                 ehi->action |= ATA_EH_RESET;
2172                 return -EINVAL;
2173         }
2174
2175         ap->ops->irq_clear(ap);
2176         __ata_bmdma_stop(ap);
2177
2178         sactive = readl(pp->sactive_block);
2179         done_mask = pp->qc_active ^ sactive;
2180
2181         if (unlikely(done_mask & sactive)) {
2182                 ata_ehi_clear_desc(ehi);
2183                 ata_ehi_push_desc(ehi, "illegal SWNCQ:qc_active transition"
2184                                   "(%08x->%08x)", pp->qc_active, sactive);
2185                 ehi->err_mask |= AC_ERR_HSM;
2186                 ehi->action |= ATA_EH_RESET;
2187                 return -EINVAL;
2188         }
2189         for (i = 0; i < ATA_MAX_QUEUE; i++) {
2190                 if (!(done_mask & (1 << i)))
2191                         continue;
2192
2193                 qc = ata_qc_from_tag(ap, i);
2194                 if (qc) {
2195                         ata_qc_complete(qc);
2196                         pp->qc_active &= ~(1 << i);
2197                         pp->dhfis_bits &= ~(1 << i);
2198                         pp->dmafis_bits &= ~(1 << i);
2199                         pp->sdbfis_bits |= (1 << i);
2200                         nr_done++;
2201                 }
2202         }
2203
2204         if (!ap->qc_active) {
2205                 DPRINTK("over\n");
2206                 nv_swncq_pp_reinit(ap);
2207                 return nr_done;
2208         }
2209
2210         if (pp->qc_active & pp->dhfis_bits)
2211                 return nr_done;
2212
2213         if ((pp->ncq_flags & ncq_saw_backout) ||
2214             (pp->qc_active ^ pp->dhfis_bits))
2215                 /* if the controller cann't get a device to host register FIS,
2216                  * The driver needs to reissue the new command.
2217                  */
2218                 lack_dhfis = 1;
2219
2220         DPRINTK("id 0x%x QC: qc_active 0x%x,"
2221                 "SWNCQ:qc_active 0x%X defer_bits %X "
2222                 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2223                 ap->print_id, ap->qc_active, pp->qc_active,
2224                 pp->defer_queue.defer_bits, pp->dhfis_bits,
2225                 pp->dmafis_bits, pp->last_issue_tag);
2226
2227         nv_swncq_fis_reinit(ap);
2228
2229         if (lack_dhfis) {
2230                 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2231                 nv_swncq_issue_atacmd(ap, qc);
2232                 return nr_done;
2233         }
2234
2235         if (pp->defer_queue.defer_bits) {
2236                 /* send deferral queue command */
2237                 qc = nv_swncq_qc_from_dq(ap);
2238                 WARN_ON(qc == NULL);
2239                 nv_swncq_issue_atacmd(ap, qc);
2240         }
2241
2242         return nr_done;
2243 }
2244
2245 static inline u32 nv_swncq_tag(struct ata_port *ap)
2246 {
2247         struct nv_swncq_port_priv *pp = ap->private_data;
2248         u32 tag;
2249
2250         tag = readb(pp->tag_block) >> 2;
2251         return (tag & 0x1f);
2252 }
2253
2254 static int nv_swncq_dmafis(struct ata_port *ap)
2255 {
2256         struct ata_queued_cmd *qc;
2257         unsigned int rw;
2258         u8 dmactl;
2259         u32 tag;
2260         struct nv_swncq_port_priv *pp = ap->private_data;
2261
2262         __ata_bmdma_stop(ap);
2263         tag = nv_swncq_tag(ap);
2264
2265         DPRINTK("dma setup tag 0x%x\n", tag);
2266         qc = ata_qc_from_tag(ap, tag);
2267
2268         if (unlikely(!qc))
2269                 return 0;
2270
2271         rw = qc->tf.flags & ATA_TFLAG_WRITE;
2272
2273         /* load PRD table addr. */
2274         iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->tag,
2275                   ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2276
2277         /* specify data direction, triple-check start bit is clear */
2278         dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2279         dmactl &= ~ATA_DMA_WR;
2280         if (!rw)
2281                 dmactl |= ATA_DMA_WR;
2282
2283         iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2284
2285         return 1;
2286 }
2287
2288 static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2289 {
2290         struct nv_swncq_port_priv *pp = ap->private_data;
2291         struct ata_queued_cmd *qc;
2292         struct ata_eh_info *ehi = &ap->link.eh_info;
2293         u32 serror;
2294         u8 ata_stat;
2295         int rc = 0;
2296
2297         ata_stat = ap->ops->check_status(ap);
2298         nv_swncq_irq_clear(ap, fis);
2299         if (!fis)
2300                 return;
2301
2302         if (ap->pflags & ATA_PFLAG_FROZEN)
2303                 return;
2304
2305         if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2306                 nv_swncq_hotplug(ap, fis);
2307                 return;
2308         }
2309
2310         if (!pp->qc_active)
2311                 return;
2312
2313         if (ap->ops->scr_read(ap, SCR_ERROR, &serror))
2314                 return;
2315         ap->ops->scr_write(ap, SCR_ERROR, serror);
2316
2317         if (ata_stat & ATA_ERR) {
2318                 ata_ehi_clear_desc(ehi);
2319                 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2320                 ehi->err_mask |= AC_ERR_DEV;
2321                 ehi->serror |= serror;
2322                 ehi->action |= ATA_EH_RESET;
2323                 ata_port_freeze(ap);
2324                 return;
2325         }
2326
2327         if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2328                 /* If the IRQ is backout, driver must issue
2329                  * the new command again some time later.
2330                  */
2331                 pp->ncq_flags |= ncq_saw_backout;
2332         }
2333
2334         if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2335                 pp->ncq_flags |= ncq_saw_sdb;
2336                 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2337                         "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2338                         ap->print_id, pp->qc_active, pp->dhfis_bits,
2339                         pp->dmafis_bits, readl(pp->sactive_block));
2340                 rc = nv_swncq_sdbfis(ap);
2341                 if (rc < 0)
2342                         goto irq_error;
2343         }
2344
2345         if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2346                 /* The interrupt indicates the new command
2347                  * was transmitted correctly to the drive.
2348                  */
2349                 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2350                 pp->ncq_flags |= ncq_saw_d2h;
2351                 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2352                         ata_ehi_push_desc(ehi, "illegal fis transaction");
2353                         ehi->err_mask |= AC_ERR_HSM;
2354                         ehi->action |= ATA_EH_RESET;
2355                         goto irq_error;
2356                 }
2357
2358                 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2359                     !(pp->ncq_flags & ncq_saw_dmas)) {
2360                         ata_stat = ap->ops->check_status(ap);
2361                         if (ata_stat & ATA_BUSY)
2362                                 goto irq_exit;
2363
2364                         if (pp->defer_queue.defer_bits) {
2365                                 DPRINTK("send next command\n");
2366                                 qc = nv_swncq_qc_from_dq(ap);
2367                                 nv_swncq_issue_atacmd(ap, qc);
2368                         }
2369                 }
2370         }
2371
2372         if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2373                 /* program the dma controller with appropriate PRD buffers
2374                  * and start the DMA transfer for requested command.
2375                  */
2376                 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2377                 pp->ncq_flags |= ncq_saw_dmas;
2378                 rc = nv_swncq_dmafis(ap);
2379         }
2380
2381 irq_exit:
2382         return;
2383 irq_error:
2384         ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2385         ata_port_freeze(ap);
2386         return;
2387 }
2388
2389 static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2390 {
2391         struct ata_host *host = dev_instance;
2392         unsigned int i;
2393         unsigned int handled = 0;
2394         unsigned long flags;
2395         u32 irq_stat;
2396
2397         spin_lock_irqsave(&host->lock, flags);
2398
2399         irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2400
2401         for (i = 0; i < host->n_ports; i++) {
2402                 struct ata_port *ap = host->ports[i];
2403
2404                 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
2405                         if (ap->link.sactive) {
2406                                 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2407                                 handled = 1;
2408                         } else {
2409                                 if (irq_stat)   /* reserve Hotplug */
2410                                         nv_swncq_irq_clear(ap, 0xfff0);
2411
2412                                 handled += nv_host_intr(ap, (u8)irq_stat);
2413                         }
2414                 }
2415                 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2416         }
2417
2418         spin_unlock_irqrestore(&host->lock, flags);
2419
2420         return IRQ_RETVAL(handled);
2421 }
2422
2423 static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2424 {
2425         static int printed_version;
2426         const struct ata_port_info *ppi[] = { NULL, NULL };
2427         struct ata_host *host;
2428         struct nv_host_priv *hpriv;
2429         int rc;
2430         u32 bar;
2431         void __iomem *base;
2432         unsigned long type = ent->driver_data;
2433
2434         // Make sure this is a SATA controller by counting the number of bars
2435         // (NVIDIA SATA controllers will always have six bars).  Otherwise,
2436         // it's an IDE controller and we ignore it.
2437         for (bar = 0; bar < 6; bar++)
2438                 if (pci_resource_start(pdev, bar) == 0)
2439                         return -ENODEV;
2440
2441         if (!printed_version++)
2442                 dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n");
2443
2444         rc = pcim_enable_device(pdev);
2445         if (rc)
2446                 return rc;
2447
2448         /* determine type and allocate host */
2449         if (type == CK804 && adma_enabled) {
2450                 dev_printk(KERN_NOTICE, &pdev->dev, "Using ADMA mode\n");
2451                 type = ADMA;
2452         }
2453
2454         if (type == SWNCQ) {
2455                 if (swncq_enabled)
2456                         dev_printk(KERN_NOTICE, &pdev->dev,
2457                                    "Using SWNCQ mode\n");
2458                 else
2459                         type = GENERIC;
2460         }
2461
2462         ppi[0] = &nv_port_info[type];
2463         rc = ata_pci_prepare_sff_host(pdev, ppi, &host);
2464         if (rc)
2465                 return rc;
2466
2467         hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2468         if (!hpriv)
2469                 return -ENOMEM;
2470         hpriv->type = type;
2471         host->private_data = hpriv;
2472
2473         /* request and iomap NV_MMIO_BAR */
2474         rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2475         if (rc)
2476                 return rc;
2477
2478         /* configure SCR access */
2479         base = host->iomap[NV_MMIO_BAR];
2480         host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2481         host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
2482
2483         /* enable SATA space for CK804 */
2484         if (type >= CK804) {
2485                 u8 regval;
2486
2487                 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2488                 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2489                 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2490         }
2491
2492         /* init ADMA */
2493         if (type == ADMA) {
2494                 rc = nv_adma_host_init(host);
2495                 if (rc)
2496                         return rc;
2497         } else if (type == SWNCQ)
2498                 nv_swncq_host_init(host);
2499
2500         pci_set_master(pdev);
2501         return ata_host_activate(host, pdev->irq, ppi[0]->irq_handler,
2502                                  IRQF_SHARED, ppi[0]->sht);
2503 }
2504
2505 #ifdef CONFIG_PM
2506 static int nv_pci_device_resume(struct pci_dev *pdev)
2507 {
2508         struct ata_host *host = dev_get_drvdata(&pdev->dev);
2509         struct nv_host_priv *hpriv = host->private_data;
2510         int rc;
2511
2512         rc = ata_pci_device_do_resume(pdev);
2513         if (rc)
2514                 return rc;
2515
2516         if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
2517                 if (hpriv->type >= CK804) {
2518                         u8 regval;
2519
2520                         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2521                         regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2522                         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2523                 }
2524                 if (hpriv->type == ADMA) {
2525                         u32 tmp32;
2526                         struct nv_adma_port_priv *pp;
2527                         /* enable/disable ADMA on the ports appropriately */
2528                         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2529
2530                         pp = host->ports[0]->private_data;
2531                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2532                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2533                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2534                         else
2535                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT0_EN |
2536                                            NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
2537                         pp = host->ports[1]->private_data;
2538                         if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
2539                                 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
2540                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2541                         else
2542                                 tmp32 |=  (NV_MCP_SATA_CFG_20_PORT1_EN |
2543                                            NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2544
2545                         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2546                 }
2547         }
2548
2549         ata_host_resume(host);
2550
2551         return 0;
2552 }
2553 #endif
2554
2555 static void nv_ck804_host_stop(struct ata_host *host)
2556 {
2557         struct pci_dev *pdev = to_pci_dev(host->dev);
2558         u8 regval;
2559
2560         /* disable SATA space for CK804 */
2561         pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2562         regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2563         pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2564 }
2565
2566 static void nv_adma_host_stop(struct ata_host *host)
2567 {
2568         struct pci_dev *pdev = to_pci_dev(host->dev);
2569         u32 tmp32;
2570
2571         /* disable ADMA on the ports */
2572         pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2573         tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2574                    NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2575                    NV_MCP_SATA_CFG_20_PORT1_EN |
2576                    NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2577
2578         pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2579
2580         nv_ck804_host_stop(host);
2581 }
2582
2583 static int __init nv_init(void)
2584 {
2585         return pci_register_driver(&nv_pci_driver);
2586 }
2587
2588 static void __exit nv_exit(void)
2589 {
2590         pci_unregister_driver(&nv_pci_driver);
2591 }
2592
2593 module_init(nv_init);
2594 module_exit(nv_exit);
2595 module_param_named(adma, adma_enabled, bool, 0444);
2596 MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: true)");
2597 module_param_named(swncq, swncq_enabled, bool, 0444);
2598 MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: false)");
2599