+ if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) {
+ --limit_warnings;
+ ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME
+ ": attempting PIO w/multiple DRQ: "
+ "this may fail due to h/w errata\n");
+ }
+ /* drop through */
+ case ATA_PROT_NODATA:
+ case ATAPI_PROT_PIO:
+ case ATAPI_PROT_NODATA:
+ if (ap->flags & ATA_FLAG_PIO_POLLING)
+ qc->tf.flags |= ATA_TFLAG_POLLING;
+ break;
+ }
+
+ if (qc->tf.flags & ATA_TFLAG_POLLING)
+ port_irqs = ERR_IRQ; /* mask device interrupt when polling */
+ else
+ port_irqs = ERR_IRQ | DONE_IRQ; /* unmask all interrupts */
+
+ /*
+ * We're about to send a non-EDMA capable command to the
+ * port. Turn off EDMA so there won't be problems accessing
+ * shadow block, etc registers.
+ */
+ mv_stop_edma(ap);
+ mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), port_irqs);
+ mv_pmp_select(ap, qc->dev->link->pmp);
+
+ if (qc->tf.command == ATA_CMD_READ_LOG_EXT) {
+ struct mv_host_priv *hpriv = ap->host->private_data;
+ /*
+ * Workaround for 88SX60x1 FEr SATA#25 (part 2).
+ *
+ * After any NCQ error, the READ_LOG_EXT command
+ * from libata-eh *must* use mv_qc_issue_fis().
+ * Otherwise it might fail, due to chip errata.
+ *
+ * Rather than special-case it, we'll just *always*
+ * use this method here for READ_LOG_EXT, making for
+ * easier testing.
+ */
+ if (IS_GEN_II(hpriv))
+ return mv_qc_issue_fis(qc);
+ }
+ return ata_sff_qc_issue(qc);
+}
+
+static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
+{
+ struct mv_port_priv *pp = ap->private_data;
+ struct ata_queued_cmd *qc;
+
+ if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
+ return NULL;
+ qc = ata_qc_from_tag(ap, ap->link.active_tag);
+ if (qc && !(qc->tf.flags & ATA_TFLAG_POLLING))
+ return qc;
+ return NULL;
+}
+
+static void mv_pmp_error_handler(struct ata_port *ap)
+{
+ unsigned int pmp, pmp_map;
+ struct mv_port_priv *pp = ap->private_data;
+
+ if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH) {
+ /*
+ * Perform NCQ error analysis on failed PMPs
+ * before we freeze the port entirely.
+ *
+ * The failed PMPs are marked earlier by mv_pmp_eh_prep().
+ */
+ pmp_map = pp->delayed_eh_pmp_map;
+ pp->pp_flags &= ~MV_PP_FLAG_DELAYED_EH;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+ pmp_map &= ~this_pmp;
+ ata_eh_analyze_ncq_error(link);
+ }
+ }
+ ata_port_freeze(ap);
+ }
+ sata_pmp_error_handler(ap);
+}
+
+static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+
+ return readl(port_mmio + SATA_TESTCTL) >> 16;
+}
+
+static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
+{
+ struct ata_eh_info *ehi;
+ unsigned int pmp;
+
+ /*
+ * Initialize EH info for PMPs which saw device errors
+ */
+ ehi = &ap->link.eh_info;
+ for (pmp = 0; pmp_map != 0; pmp++) {
+ unsigned int this_pmp = (1 << pmp);
+ if (pmp_map & this_pmp) {
+ struct ata_link *link = &ap->pmp_link[pmp];
+
+ pmp_map &= ~this_pmp;
+ ehi = &link->eh_info;
+ ata_ehi_clear_desc(ehi);
+ ata_ehi_push_desc(ehi, "dev err");
+ ehi->err_mask |= AC_ERR_DEV;
+ ehi->action |= ATA_EH_RESET;
+ ata_link_abort(link);
+ }
+ }
+}
+
+static int mv_req_q_empty(struct ata_port *ap)
+{
+ void __iomem *port_mmio = mv_ap_base(ap);
+ u32 in_ptr, out_ptr;
+
+ in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR)
+ >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
+ return (in_ptr == out_ptr); /* 1 == queue_is_empty */
+}
+
+static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
+{
+ struct mv_port_priv *pp = ap->private_data;
+ int failed_links;
+ unsigned int old_map, new_map;
+
+ /*
+ * Device error during FBS+NCQ operation:
+ *
+ * Set a port flag to prevent further I/O being enqueued.
+ * Leave the EDMA running to drain outstanding commands from this port.
+ * Perform the post-mortem/EH only when all responses are complete.
+ * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
+ */
+ if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
+ pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
+ pp->delayed_eh_pmp_map = 0;
+ }
+ old_map = pp->delayed_eh_pmp_map;
+ new_map = old_map | mv_get_err_pmp_map(ap);
+
+ if (old_map != new_map) {
+ pp->delayed_eh_pmp_map = new_map;
+ mv_pmp_eh_prep(ap, new_map & ~old_map);
+ }
+ failed_links = hweight16(new_map);
+
+ ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
+ "failed_links=%d nr_active_links=%d\n",
+ __func__, pp->delayed_eh_pmp_map,
+ ap->qc_active, failed_links,
+ ap->nr_active_links);
+
+ if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
+ mv_process_crpb_entries(ap, pp);