/* fill a qtd, returning how much of the buffer we were able to queue up */
static int
-qtd_fill (struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
- int token, int maxpacket)
+qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
- qtd->hw_buf [0] = cpu_to_le32 ((u32)addr);
- qtd->hw_buf_hi [0] = cpu_to_le32 ((u32)(addr >> 32));
+ qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely (len < count)) /* ... iff needed */
count = len;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
- qtd->hw_buf [i] = cpu_to_le32 ((u32)addr);
- qtd->hw_buf_hi [i] = cpu_to_le32 ((u32)(addr >> 32));
+ qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(ehci,
+ (u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
if (count != len)
count -= (count % maxpacket);
}
- qtd->hw_token = cpu_to_le32 ((count << 16) | token);
+ qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token);
qtd->length = count;
return count;
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
- qh->hw_qtd_next = QTD_NEXT (qtd->qtd_dma);
- qh->hw_alt_next = EHCI_LIST_END;
+ qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ qh->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
- if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
+ if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
- is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
- epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
+ is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
+ epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
- qh->hw_token &= ~__constant_cpu_to_le32 (QTD_TOGGLE);
+ qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
- qh->hw_token &= __constant_cpu_to_le32 (QTD_TOGGLE | QTD_STS_PING);
+ qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
- if (cpu_to_le32 (qtd->qtd_dma) == qh->hw_current)
+ if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
/*-------------------------------------------------------------------------*/
-static void qtd_copy_status (
+static int qtd_copy_status (
struct ehci_hcd *ehci,
struct urb *urb,
size_t length,
u32 token
)
{
+ int status = -EINPROGRESS;
+
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely (QTD_PID (token) != 2))
urb->actual_length += length - QTD_LENGTH (token);
/* don't modify error codes */
- if (unlikely (urb->status != -EINPROGRESS))
- return;
+ if (unlikely(urb->unlinked))
+ return status;
/* force cleanup after short read; not always an error */
if (unlikely (IS_SHORT_READ (token)))
- urb->status = -EREMOTEIO;
+ status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
- urb->status = -EOVERFLOW;
+ status = -EOVERFLOW;
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
- urb->status = -EPROTO;
+ status = -EPROTO;
} else if (token & QTD_STS_DBE) {
- urb->status = (QTD_PID (token) == 1) /* IN ? */
+ status = (QTD_PID (token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR (token))
- urb->status = -EPIPE;
+ status = -EPIPE;
else {
ehci_dbg (ehci, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out");
- urb->status = -EPROTO;
+ status = -EPROTO;
}
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR (token))
- urb->status = -EPIPE;
+ status = -EPIPE;
else /* unknown */
- urb->status = -EPROTO;
+ status = -EPROTO;
ehci_vdbg (ehci,
"dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice (urb->pipe),
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
- token, urb->status);
+ token, status);
/* if async CSPLIT failed, try cleaning out the TT buffer */
- if (urb->status != -EPIPE
- && urb->dev->tt && !usb_pipeint (urb->pipe)
+ if (status != -EPIPE
+ && urb->dev->tt
+ && !usb_pipeint(urb->pipe)
&& ((token & QTD_STS_MMF) != 0
|| QTD_CERR(token) == 0)
&& (!ehci_is_TDI(ehci)
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint (urb->pipe), token);
#endif /* DEBUG */
- usb_hub_tt_clear_buffer (urb->dev, urb->pipe);
+ /* REVISIT ARC-derived cores don't clear the root
+ * hub TT buffer in this way...
+ */
+ usb_hub_clear_tt_buffer(urb);
}
}
+
+ return status;
}
static void
-ehci_urb_done (struct ehci_hcd *ehci, struct urb *urb)
+ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
__releases(ehci->lock)
__acquires(ehci->lock)
{
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
- if ((qh->hw_info2 & __constant_cpu_to_le32 (QH_SMASK)) != 0) {
+ if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
qh_put (qh);
}
- spin_lock (&urb->lock);
- urb->hcpriv = NULL;
- switch (urb->status) {
- case -EINPROGRESS: /* success */
- urb->status = 0;
- default: /* fault */
- COUNT (ehci->stats.complete);
- break;
- case -EREMOTEIO: /* fault or normal */
- if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
- urb->status = 0;
- COUNT (ehci->stats.complete);
- break;
- case -ECONNRESET: /* canceled */
- case -ENOENT:
- COUNT (ehci->stats.unlink);
- break;
+ if (unlikely(urb->unlinked)) {
+ COUNT(ehci->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(ehci->stats.complete);
}
- spin_unlock (&urb->lock);
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s status %d len %d/%d\n",
- __FUNCTION__, urb->dev->devpath, urb,
+ __func__, urb->dev->devpath, urb,
usb_pipeendpoint (urb->pipe),
usb_pipein (urb->pipe) ? "in" : "out",
- urb->status,
+ status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
spin_unlock (&ehci->lock);
- usb_hcd_giveback_urb (ehci_to_hcd(ehci), urb);
+ usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
spin_lock (&ehci->lock);
}
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
-#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct list_head *entry, *tmp;
+ int last_status = -EINPROGRESS;
int stopped;
unsigned count = 0;
- int do_status = 0;
u8 state;
+ __le32 halt = HALT_BIT(ehci);
if (unlikely (list_empty (&qh->qtd_list)))
return count;
/* clean up any state from previous QTD ...*/
if (last) {
if (likely (last->urb != urb)) {
- ehci_urb_done (ehci, last->urb);
+ ehci_urb_done(ehci, last->urb, last_status);
count++;
+ last_status = -EINPROGRESS;
}
ehci_qtd_free (ehci, last);
last = NULL;
/* hardware copies qtd out of qh overlay */
rmb ();
- token = le32_to_cpu (qtd->hw_token);
+ token = hc32_to_cpu(ehci, qtd->hw_token);
/* always clean up qtds the hc de-activated */
+ retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ --qh->xacterrs > 0 &&
+ !urb->unlinked) {
+ ehci_dbg(ehci,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length,
+ QH_XACTERR_MAX - qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (EHCI_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(ehci,
+ token);
+ wmb();
+ qh->hw_token = cpu_to_hc32(ehci, token);
+ goto retry_xacterr;
+ }
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
*/
} else if (IS_SHORT_READ (token)
- && !(qtd->hw_alt_next & EHCI_LIST_END)) {
+ && !(qtd->hw_alt_next
+ & EHCI_LIST_END(ehci))) {
stopped = 1;
goto halt;
}
&& HC_IS_RUNNING (ehci_to_hcd(ehci)->state))) {
break;
+ /* scan the whole queue for unlinks whenever it stops */
} else {
stopped = 1;
- if (unlikely (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)))
- urb->status = -ESHUTDOWN;
+ /* cancel everything if we halt, suspend, etc */
+ if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))
+ last_status = -ESHUTDOWN;
- /* ignore active urbs unless some previous qtd
- * for the urb faulted (including short read) or
- * its urb was canceled. we may patch qh or qtds.
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
*/
- if (likely (urb->status == -EINPROGRESS))
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
continue;
- /* issue status after short control reads */
- if (unlikely (do_status != 0)
- && QTD_PID (token) == 0 /* OUT */) {
- do_status = 0;
- continue;
- }
-
- /* token in overlay may be most current */
+ /* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE
- && cpu_to_le32 (qtd->qtd_dma)
+ && cpu_to_hc32(ehci, qtd->qtd_dma)
== qh->hw_current)
- token = le32_to_cpu (qh->hw_token);
+ token = hc32_to_cpu(ehci, qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
- if ((HALT_BIT & qh->hw_token) == 0) {
+ if ((halt & qh->hw_token) == 0) {
halt:
- qh->hw_token |= HALT_BIT;
+ qh->hw_token |= halt;
wmb ();
}
}
- /* remove it from the queue */
- spin_lock (&urb->lock);
- qtd_copy_status (ehci, urb, qtd->length, token);
- do_status = (urb->status == -EREMOTEIO)
- && usb_pipecontrol (urb->pipe);
- spin_unlock (&urb->lock);
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(ehci, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & EHCI_LIST_END(ehci)))
+ last_status = -EINPROGRESS;
+ }
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry (qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
+
+ /* remove qtd; it's recycled after possible urb completion */
list_del (&qtd->qtd_list);
last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = QH_XACTERR_MAX;
}
/* last urb's completion might still need calling */
if (likely (last != NULL)) {
- ehci_urb_done (ehci, last->urb);
+ ehci_urb_done(ehci, last->urb, last_status);
count++;
ehci_qtd_free (ehci, last);
}
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
- if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
+ if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
break;
case QH_STATE_LINKED:
- /* should be rare for periodic transfers,
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
- if ((__constant_cpu_to_le32 (QH_SMASK)
+ if ((cpu_to_hc32(ehci, QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule (ehci, qh);
(void) qh_schedule (ehci, qh);
is_input = usb_pipein (urb->pipe);
if (usb_pipecontrol (urb->pipe)) {
/* SETUP pid */
- qtd_fill (qtd, urb->setup_dma, sizeof (struct usb_ctrlrequest),
- token | (2 /* "setup" */ << 8), 8);
+ qtd_fill(ehci, qtd, urb->setup_dma,
+ sizeof (struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
for (;;) {
int this_qtd_len;
- this_qtd_len = qtd_fill (qtd, buf, len, token, maxpacket);
+ this_qtd_len = qtd_fill(ehci, qtd, buf, len, token, maxpacket);
len -= this_qtd_len;
buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
if (is_input)
qtd->hw_alt_next = ehci->async->hw_alt_next;
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
}
- /* unless the bulk/interrupt caller wants a chance to clean
- * up after short reads, hc should advance qh past this urb
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
*/
if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol (urb->pipe)))
- qtd->hw_alt_next = EHCI_LIST_END;
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
/*
* control requests may need a terminating data "status" ack;
if (unlikely (!qtd))
goto cleanup;
qtd->urb = urb;
- qtd_prev->hw_next = QTD_NEXT (qtd->qtd_dma);
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail (&qtd->qtd_list, head);
/* never any data in such packets */
- qtd_fill (qtd, 0, 0, token, 0);
+ qtd_fill(ehci, qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT)))
- qtd->hw_token |= __constant_cpu_to_le32 (QTD_IOC);
+ qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
return head;
cleanup:
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
if (!qh)
return qh;
type = usb_pipetype (urb->pipe);
maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input);
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp));
+ goto done;
+ }
+
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
- qh->usecs = NS_TO_US (usb_calc_bus_time (USB_SPEED_HIGH, is_input, 0,
- hb_mult (maxp) * max_packet (maxp)));
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
goto done;
}
} else {
- struct usb_tt *tt = urb->dev->tt;
int think_time;
/* gap is f(FS/LS transfer times) */
/* set the address of the TT; for TDI's integrated
* root hub tt, leave it zeroed.
*/
- if (!ehci_is_TDI(ehci)
- || urb->dev->tt->hub !=
- ehci_to_hcd(ehci)->self.root_hub)
- info2 |= urb->dev->tt->hub->devnum << 16;
+ if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
- info1 |= 512 << 16; /* usb2 fixed maxpacket */
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet (maxp) << 16;
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
- qh->hw_info1 = cpu_to_le32 (info1);
- qh->hw_info2 = cpu_to_le32 (info2);
+ qh->hw_info1 = cpu_to_hc32(ehci, info1);
+ qh->hw_info2 = cpu_to_hc32(ehci, info2);
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- __le32 dma = QH_NEXT (qh->qh_dma);
+ __hc32 dma = QH_NEXT(ehci, qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
head = ehci->async;
timer_action_done (ehci, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
- u32 cmd = readl (&ehci->regs->command);
+ u32 cmd = ehci_readl(ehci, &ehci->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
- (void) handshake (&ehci->regs->status, STS_ASS, 0, 150);
+ (void)handshake(ehci, &ehci->regs->status,
+ STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
- writel (cmd, &ehci->regs->command);
+ ehci_writel(ehci, cmd, &ehci->regs->command);
ehci_to_hcd(ehci)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
head->qh_next.qh = qh;
head->hw_next = dma;
+ qh->xacterrs = QH_XACTERR_MAX;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
/*-------------------------------------------------------------------------*/
-#define QH_ADDR_MASK __constant_cpu_to_le32(0x7f)
-
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
)
{
struct ehci_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
qh = (struct ehci_qh *) *ptr;
if (unlikely (qh == NULL)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
- qh->hw_info1 &= ~QH_ADDR_MASK;
+ qh->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
if (likely (qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
- __le32 token;
+ __hc32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
- qtd->hw_token = HALT_BIT;
+ qtd->hw_token = HALT_BIT(ehci);
wmb ();
dummy = qh->dummy;
list_del (&qtd->qtd_list);
list_add (&dummy->qtd_list, qtd_list);
- __list_splice (qtd_list, qh->qtd_list.prev);
+ list_splice_tail(qtd_list, &qh->qtd_list);
- ehci_qtd_init (qtd, qtd->qtd_dma);
+ ehci_qtd_init(ehci, qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry (qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
- qtd->hw_next = QTD_NEXT (dma);
+ qtd->hw_next = QTD_NEXT(ehci, dma);
/* let the hc process these next qtds */
wmb ();
static int
submit_async (
struct ehci_hcd *ehci,
- struct usb_host_endpoint *ep,
struct urb *urb,
struct list_head *qtd_list,
gfp_t mem_flags
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
- int rc = 0;
+ int rc;
qtd = list_entry (qtd_list->next, struct ehci_qtd, qtd_list);
- epnum = ep->desc.bEndpointAddress;
+ epnum = urb->ep->desc.bEndpointAddress;
#ifdef EHCI_URB_TRACE
ehci_dbg (ehci,
"%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
- __FUNCTION__, urb->dev->devpath, urb,
+ __func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
- qtd, ep->hcpriv);
+ qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave (&ehci->lock, flags);
rc = -ESHUTDOWN;
goto done;
}
+ rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb);
+ if (unlikely(rc))
+ goto done;
- qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv);
+ qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
rc = -ENOMEM;
goto done;
}
struct ehci_qh *qh = ehci->reclaim;
struct ehci_qh *next;
- iaa_watchdog_done (ehci);
+ iaa_watchdog_done(ehci);
- // qh->hw_next = cpu_to_le32 (qh->qh_dma);
+ // qh->hw_next = cpu_to_hc32(qh->qh_dma);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put (qh); // refcount from reclaim
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
- int cmd = readl (&ehci->regs->command);
+ int cmd = ehci_readl(ehci, &ehci->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
if (ehci_to_hcd(ehci)->state != HC_STATE_HALT
&& !ehci->reclaim) {
/* ... and CMD_IAAD clear */
- writel (cmd & ~CMD_ASE, &ehci->regs->command);
+ ehci_writel(ehci, cmd & ~CMD_ASE,
+ &ehci->regs->command);
wmb ();
// handshake later, if we need to
timer_action_done (ehci, TIMER_ASYNC_OFF);
prev->qh_next = qh->qh_next;
wmb ();
- if (unlikely (ehci_to_hcd(ehci)->state == HC_STATE_HALT)) {
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(!HC_IS_RUNNING(ehci_to_hcd(ehci)->state))) {
/* if (unlikely (qh->reclaim != 0))
* this will recurse, probably not much
*/
}
cmd |= CMD_IAAD;
- writel (cmd, &ehci->regs->command);
- (void) readl (&ehci->regs->command);
- iaa_watchdog_start (ehci);
+ ehci_writel(ehci, cmd, &ehci->regs->command);
+ (void)ehci_readl(ehci, &ehci->regs->command);
+ iaa_watchdog_start(ehci);
}
/*-------------------------------------------------------------------------*/
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
- if (!++(ehci->stamp))
- ehci->stamp++;
+ ehci->stamp = ehci_readl(ehci, &ehci->regs->frame_index);
timer_action_done (ehci, TIMER_ASYNC_SHRINK);
rescan:
qh = ehci->async->qh_next.qh;
}
}
- /* unlink idle entries, reducing HC PCI usage as well
+ /* unlink idle entries, reducing DMA usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
- if (list_empty (&qh->qtd_list)) {
- if (qh->stamp == ehci->stamp)
+ if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ if (!ehci->reclaim
+ && ((ehci->stamp - qh->stamp) & 0x1fff)
+ >= (EHCI_SHRINK_FRAMES * 8))
+ start_unlink_async(ehci, qh);
+ else
action = TIMER_ASYNC_SHRINK;
- else if (!ehci->reclaim
- && qh->qh_state == QH_STATE_LINKED)
- start_unlink_async (ehci, qh);
}
qh = qh->qh_next.qh;