2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
99 static void musb_ep_program(struct musb *musb, u8 epnum,
100 struct urb *urb, int is_out,
101 u8 *buf, u32 offset, u32 len);
104 * Clear TX fifo. Needed to avoid BABBLE errors.
106 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
108 void __iomem *epio = ep->regs;
113 csr = musb_readw(epio, MUSB_TXCSR);
114 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
116 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
118 csr |= MUSB_TXCSR_FLUSHFIFO;
119 musb_writew(epio, MUSB_TXCSR, csr);
120 csr = musb_readw(epio, MUSB_TXCSR);
121 if (WARN(retries-- < 1,
122 "Could not flush host TX%d fifo: csr: %04x\n",
129 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
131 void __iomem *epio = ep->regs;
135 /* scrub any data left in the fifo */
137 csr = musb_readw(epio, MUSB_TXCSR);
138 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
140 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
141 csr = musb_readw(epio, MUSB_TXCSR);
145 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
148 /* and reset for the next transfer */
149 musb_writew(epio, MUSB_TXCSR, 0);
153 * Start transmit. Caller is responsible for locking shared resources.
154 * musb must be locked.
156 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
160 /* NOTE: no locks here; caller should lock and select EP */
162 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
163 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
164 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
166 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
167 musb_writew(ep->regs, MUSB_CSR0, txcsr);
172 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
176 /* NOTE: no locks here; caller should lock and select EP */
177 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
178 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
179 if (is_cppi_enabled())
180 txcsr |= MUSB_TXCSR_DMAMODE;
181 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
185 * Start the URB at the front of an endpoint's queue
186 * end must be claimed from the caller.
188 * Context: controller locked, irqs blocked
191 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
195 void __iomem *mbase = musb->mregs;
196 struct urb *urb = next_urb(qh);
197 void *buf = urb->transfer_buffer;
199 struct musb_hw_ep *hw_ep = qh->hw_ep;
200 unsigned pipe = urb->pipe;
201 u8 address = usb_pipedevice(pipe);
202 int epnum = hw_ep->epnum;
204 /* initialize software qh state */
208 /* gather right source of data */
210 case USB_ENDPOINT_XFER_CONTROL:
211 /* control transfers always start with SETUP */
214 musb->ep0_stage = MUSB_EP0_START;
215 buf = urb->setup_packet;
218 case USB_ENDPOINT_XFER_ISOC:
221 offset = urb->iso_frame_desc[0].offset;
222 len = urb->iso_frame_desc[0].length;
224 default: /* bulk, interrupt */
225 /* actual_length may be nonzero on retry paths */
226 buf = urb->transfer_buffer + urb->actual_length;
227 len = urb->transfer_buffer_length - urb->actual_length;
230 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
231 qh, urb, address, qh->epnum,
232 is_in ? "in" : "out",
233 ({char *s; switch (qh->type) {
234 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
235 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
236 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
237 default: s = "-intr"; break;
239 epnum, buf + offset, len);
241 /* Configure endpoint */
242 if (is_in || hw_ep->is_shared_fifo)
246 musb_ep_program(musb, epnum, urb, !is_in, buf, offset, len);
248 /* transmit may have more work: start it when it is time */
252 /* determine if the time is right for a periodic transfer */
254 case USB_ENDPOINT_XFER_ISOC:
255 case USB_ENDPOINT_XFER_INT:
256 DBG(3, "check whether there's still time for periodic Tx\n");
257 frame = musb_readw(mbase, MUSB_FRAME);
258 /* FIXME this doesn't implement that scheduling policy ...
259 * or handle framecounter wrapping
261 if ((urb->transfer_flags & URB_ISO_ASAP)
262 || (frame >= urb->start_frame)) {
263 /* REVISIT the SOF irq handler shouldn't duplicate
264 * this code; and we don't init urb->start_frame...
269 qh->frame = urb->start_frame;
270 /* enable SOF interrupt so we can count down */
271 DBG(1, "SOF for %d\n", epnum);
272 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
273 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
279 DBG(4, "Start TX%d %s\n", epnum,
280 hw_ep->tx_channel ? "dma" : "pio");
282 if (!hw_ep->tx_channel)
283 musb_h_tx_start(hw_ep);
284 else if (is_cppi_enabled() || tusb_dma_omap())
285 musb_h_tx_dma_start(hw_ep);
289 /* caller owns controller lock, irqs are blocked */
291 __musb_giveback(struct musb *musb, struct urb *urb, int status)
292 __releases(musb->lock)
293 __acquires(musb->lock)
295 DBG(({ int level; switch (status) {
299 /* common/boring faults */
310 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
311 urb, urb->complete, status,
312 usb_pipedevice(urb->pipe),
313 usb_pipeendpoint(urb->pipe),
314 usb_pipein(urb->pipe) ? "in" : "out",
315 urb->actual_length, urb->transfer_buffer_length
318 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
319 spin_unlock(&musb->lock);
320 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
321 spin_lock(&musb->lock);
324 /* For bulk/interrupt endpoints only */
325 static inline void musb_save_toggle(struct musb_qh *qh, int is_in,
328 void __iomem *epio = qh->hw_ep->regs;
332 * FIXME: the current Mentor DMA code seems to have
333 * problems getting toggle correct.
337 csr = musb_readw(epio, MUSB_RXCSR) & MUSB_RXCSR_H_DATATOGGLE;
339 csr = musb_readw(epio, MUSB_TXCSR) & MUSB_TXCSR_H_DATATOGGLE;
341 usb_settoggle(urb->dev, qh->epnum, !is_in, csr ? 1 : 0);
344 /* caller owns controller lock, irqs are blocked */
345 static struct musb_qh *
346 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
348 struct musb_hw_ep *ep = qh->hw_ep;
349 struct musb *musb = ep->musb;
350 int is_in = usb_pipein(urb->pipe);
351 int ready = qh->is_ready;
353 /* save toggle eagerly, for paranoia */
355 case USB_ENDPOINT_XFER_BULK:
356 case USB_ENDPOINT_XFER_INT:
357 musb_save_toggle(qh, is_in, urb);
359 case USB_ENDPOINT_XFER_ISOC:
360 if (status == 0 && urb->error_count)
366 __musb_giveback(musb, urb, status);
367 qh->is_ready = ready;
369 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
370 * invalidate qh as soon as list_empty(&hep->urb_list)
372 if (list_empty(&qh->hep->urb_list)) {
373 struct list_head *head;
380 /* clobber old pointers to this qh */
381 if (is_in || ep->is_shared_fifo)
385 qh->hep->hcpriv = NULL;
389 case USB_ENDPOINT_XFER_CONTROL:
390 case USB_ENDPOINT_XFER_BULK:
391 /* fifo policy for these lists, except that NAKing
392 * should rotate a qh to the end (for fairness).
395 head = qh->ring.prev;
402 case USB_ENDPOINT_XFER_ISOC:
403 case USB_ENDPOINT_XFER_INT:
404 /* this is where periodic bandwidth should be
405 * de-allocated if it's tracked and allocated;
406 * and where we'd update the schedule tree...
417 * Advance this hardware endpoint's queue, completing the specified urb and
418 * advancing to either the next urb queued to that qh, or else invalidating
419 * that qh and advancing to the next qh scheduled after the current one.
421 * Context: caller owns controller lock, irqs are blocked
424 musb_advance_schedule(struct musb *musb, struct urb *urb,
425 struct musb_hw_ep *hw_ep, int is_in)
429 if (is_in || hw_ep->is_shared_fifo)
434 if (urb->status == -EINPROGRESS)
435 qh = musb_giveback(qh, urb, 0);
437 qh = musb_giveback(qh, urb, urb->status);
439 if (qh != NULL && qh->is_ready) {
440 DBG(4, "... next ep%d %cX urb %p\n",
441 hw_ep->epnum, is_in ? 'R' : 'T',
443 musb_start_urb(musb, is_in, qh);
447 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
449 /* we don't want fifo to fill itself again;
450 * ignore dma (various models),
451 * leave toggle alone (may not have been saved yet)
453 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
454 csr &= ~(MUSB_RXCSR_H_REQPKT
455 | MUSB_RXCSR_H_AUTOREQ
456 | MUSB_RXCSR_AUTOCLEAR);
458 /* write 2x to allow double buffering */
459 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
460 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
462 /* flush writebuffer */
463 return musb_readw(hw_ep->regs, MUSB_RXCSR);
467 * PIO RX for a packet (or part of it).
470 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
478 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
479 void __iomem *epio = hw_ep->regs;
480 struct musb_qh *qh = hw_ep->in_qh;
481 int pipe = urb->pipe;
482 void *buffer = urb->transfer_buffer;
484 /* musb_ep_select(mbase, epnum); */
485 rx_count = musb_readw(epio, MUSB_RXCOUNT);
486 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
487 urb->transfer_buffer, qh->offset,
488 urb->transfer_buffer_length);
491 if (usb_pipeisoc(pipe)) {
493 struct usb_iso_packet_descriptor *d;
500 d = urb->iso_frame_desc + qh->iso_idx;
501 buf = buffer + d->offset;
503 if (rx_count > length) {
508 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
512 urb->actual_length += length;
513 d->actual_length = length;
517 /* see if we are done */
518 done = (++qh->iso_idx >= urb->number_of_packets);
521 buf = buffer + qh->offset;
522 length = urb->transfer_buffer_length - qh->offset;
523 if (rx_count > length) {
524 if (urb->status == -EINPROGRESS)
525 urb->status = -EOVERFLOW;
526 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
530 urb->actual_length += length;
531 qh->offset += length;
533 /* see if we are done */
534 done = (urb->actual_length == urb->transfer_buffer_length)
535 || (rx_count < qh->maxpacket)
536 || (urb->status != -EINPROGRESS);
538 && (urb->status == -EINPROGRESS)
539 && (urb->transfer_flags & URB_SHORT_NOT_OK)
540 && (urb->actual_length
541 < urb->transfer_buffer_length))
542 urb->status = -EREMOTEIO;
545 musb_read_fifo(hw_ep, length, buf);
547 csr = musb_readw(epio, MUSB_RXCSR);
548 csr |= MUSB_RXCSR_H_WZC_BITS;
549 if (unlikely(do_flush))
550 musb_h_flush_rxfifo(hw_ep, csr);
552 /* REVISIT this assumes AUTOCLEAR is never set */
553 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
555 csr |= MUSB_RXCSR_H_REQPKT;
556 musb_writew(epio, MUSB_RXCSR, csr);
562 /* we don't always need to reinit a given side of an endpoint...
563 * when we do, use tx/rx reinit routine and then construct a new CSR
564 * to address data toggle, NYET, and DMA or PIO.
566 * it's possible that driver bugs (especially for DMA) or aborting a
567 * transfer might have left the endpoint busier than it should be.
568 * the busy/not-empty tests are basically paranoia.
571 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
575 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
576 * That always uses tx_reinit since ep0 repurposes TX register
577 * offsets; the initial SETUP packet is also a kind of OUT.
580 /* if programmed for Tx, put it in RX mode */
581 if (ep->is_shared_fifo) {
582 csr = musb_readw(ep->regs, MUSB_TXCSR);
583 if (csr & MUSB_TXCSR_MODE) {
584 musb_h_tx_flush_fifo(ep);
585 csr = musb_readw(ep->regs, MUSB_TXCSR);
586 musb_writew(ep->regs, MUSB_TXCSR,
587 csr | MUSB_TXCSR_FRCDATATOG);
591 * Clear the MODE bit (and everything else) to enable Rx.
592 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
594 if (csr & MUSB_TXCSR_DMAMODE)
595 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
596 musb_writew(ep->regs, MUSB_TXCSR, 0);
598 /* scrub all previous state, clearing toggle */
600 csr = musb_readw(ep->regs, MUSB_RXCSR);
601 if (csr & MUSB_RXCSR_RXPKTRDY)
602 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
603 musb_readw(ep->regs, MUSB_RXCOUNT));
605 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
608 /* target addr and (for multipoint) hub addr/port */
609 if (musb->is_multipoint) {
610 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
611 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
612 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
615 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
617 /* protocol/endpoint, interval/NAKlimit, i/o size */
618 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
619 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
620 /* NOTE: bulk combining rewrites high bits of maxpacket */
621 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
626 static bool musb_tx_dma_program(struct dma_controller *dma,
627 struct musb_hw_ep *hw_ep, struct musb_qh *qh,
628 struct urb *urb, u32 offset, u32 length)
630 struct dma_channel *channel = hw_ep->tx_channel;
631 void __iomem *epio = hw_ep->regs;
632 u16 pkt_size = qh->maxpacket;
636 #ifdef CONFIG_USB_INVENTRA_DMA
637 if (length > channel->max_len)
638 length = channel->max_len;
640 csr = musb_readw(epio, MUSB_TXCSR);
641 if (length > pkt_size) {
643 csr |= MUSB_TXCSR_AUTOSET
645 | MUSB_TXCSR_DMAENAB;
648 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
649 csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
651 channel->desired_mode = mode;
652 musb_writew(epio, MUSB_TXCSR, csr);
654 if (!is_cppi_enabled() && !tusb_dma_omap())
657 channel->actual_len = 0;
660 * TX uses "RNDIS" mode automatically but needs help
661 * to identify the zero-length-final-packet case.
663 mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
666 qh->segsize = length;
668 if (!dma->channel_program(channel, pkt_size, mode,
669 urb->transfer_dma + offset, length)) {
670 dma->channel_release(channel);
671 hw_ep->tx_channel = NULL;
673 csr = musb_readw(epio, MUSB_TXCSR);
674 csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB);
675 musb_writew(epio, MUSB_TXCSR, csr | MUSB_TXCSR_H_WZC_BITS);
682 * Program an HDRC endpoint as per the given URB
683 * Context: irqs blocked, controller lock held
685 static void musb_ep_program(struct musb *musb, u8 epnum,
686 struct urb *urb, int is_out,
687 u8 *buf, u32 offset, u32 len)
689 struct dma_controller *dma_controller;
690 struct dma_channel *dma_channel;
692 void __iomem *mbase = musb->mregs;
693 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
694 void __iomem *epio = hw_ep->regs;
698 if (!is_out || hw_ep->is_shared_fifo)
703 packet_sz = qh->maxpacket;
705 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
706 "h_addr%02x h_port%02x bytes %d\n",
707 is_out ? "-->" : "<--",
708 epnum, urb, urb->dev->speed,
709 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
710 qh->h_addr_reg, qh->h_port_reg,
713 musb_ep_select(mbase, epnum);
715 /* candidate for DMA? */
716 dma_controller = musb->dma_controller;
717 if (is_dma_capable() && epnum && dma_controller) {
718 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
720 dma_channel = dma_controller->channel_alloc(
721 dma_controller, hw_ep, is_out);
723 hw_ep->tx_channel = dma_channel;
725 hw_ep->rx_channel = dma_channel;
730 /* make sure we clear DMAEnab, autoSet bits from previous run */
732 /* OUT/transmit/EP0 or IN/receive? */
738 csr = musb_readw(epio, MUSB_TXCSR);
740 /* disable interrupt in case we flush */
741 int_txe = musb_readw(mbase, MUSB_INTRTXE);
742 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
744 /* general endpoint setup */
746 /* flush all old state, set default */
747 musb_h_tx_flush_fifo(hw_ep);
750 * We must not clear the DMAMODE bit before or in
751 * the same cycle with the DMAENAB bit, so we clear
752 * the latter first...
754 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
757 | MUSB_TXCSR_FRCDATATOG
758 | MUSB_TXCSR_H_RXSTALL
760 | MUSB_TXCSR_TXPKTRDY
762 csr |= MUSB_TXCSR_MODE;
764 if (usb_gettoggle(urb->dev, qh->epnum, 1))
765 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
766 | MUSB_TXCSR_H_DATATOGGLE;
768 csr |= MUSB_TXCSR_CLRDATATOG;
770 musb_writew(epio, MUSB_TXCSR, csr);
771 /* REVISIT may need to clear FLUSHFIFO ... */
772 csr &= ~MUSB_TXCSR_DMAMODE;
773 musb_writew(epio, MUSB_TXCSR, csr);
774 csr = musb_readw(epio, MUSB_TXCSR);
776 /* endpoint 0: just flush */
777 musb_h_ep0_flush_fifo(hw_ep);
780 /* target addr and (for multipoint) hub addr/port */
781 if (musb->is_multipoint) {
782 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
783 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
784 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
785 /* FIXME if !epnum, do the same for RX ... */
787 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
789 /* protocol/endpoint/interval/NAKlimit */
791 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
792 if (can_bulk_split(musb, qh->type))
793 musb_writew(epio, MUSB_TXMAXP,
795 | ((hw_ep->max_packet_sz_tx /
796 packet_sz) - 1) << 11);
798 musb_writew(epio, MUSB_TXMAXP,
800 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
802 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
803 if (musb->is_multipoint)
804 musb_writeb(epio, MUSB_TYPE0,
808 if (can_bulk_split(musb, qh->type))
809 load_count = min((u32) hw_ep->max_packet_sz_tx,
812 load_count = min((u32) packet_sz, len);
814 if (dma_channel && musb_tx_dma_program(dma_controller,
815 hw_ep, qh, urb, offset, len))
819 /* PIO to load FIFO */
820 qh->segsize = load_count;
821 musb_write_fifo(hw_ep, load_count, buf);
824 /* re-enable interrupt */
825 musb_writew(mbase, MUSB_INTRTXE, int_txe);
831 if (hw_ep->rx_reinit) {
832 musb_rx_reinit(musb, qh, hw_ep);
834 /* init new state: toggle and NYET, maybe DMA later */
835 if (usb_gettoggle(urb->dev, qh->epnum, 0))
836 csr = MUSB_RXCSR_H_WR_DATATOGGLE
837 | MUSB_RXCSR_H_DATATOGGLE;
840 if (qh->type == USB_ENDPOINT_XFER_INT)
841 csr |= MUSB_RXCSR_DISNYET;
844 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
846 if (csr & (MUSB_RXCSR_RXPKTRDY
848 | MUSB_RXCSR_H_REQPKT))
849 ERR("broken !rx_reinit, ep%d csr %04x\n",
852 /* scrub any stale state, leaving toggle alone */
853 csr &= MUSB_RXCSR_DISNYET;
856 /* kick things off */
858 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
859 /* candidate for DMA */
861 dma_channel->actual_len = 0L;
864 /* AUTOREQ is in a DMA register */
865 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
866 csr = musb_readw(hw_ep->regs,
869 /* unless caller treats short rx transfers as
870 * errors, we dare not queue multiple transfers.
872 dma_ok = dma_controller->channel_program(
873 dma_channel, packet_sz,
874 !(urb->transfer_flags
876 urb->transfer_dma + offset,
879 dma_controller->channel_release(
881 hw_ep->rx_channel = NULL;
884 csr |= MUSB_RXCSR_DMAENAB;
888 csr |= MUSB_RXCSR_H_REQPKT;
889 DBG(7, "RXCSR%d := %04x\n", epnum, csr);
890 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
891 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
897 * Service the default endpoint (ep0) as host.
898 * Return true until it's time to start the status stage.
900 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
903 u8 *fifo_dest = NULL;
905 struct musb_hw_ep *hw_ep = musb->control_ep;
906 struct musb_qh *qh = hw_ep->in_qh;
907 struct usb_ctrlrequest *request;
909 switch (musb->ep0_stage) {
911 fifo_dest = urb->transfer_buffer + urb->actual_length;
912 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
914 if (fifo_count < len)
915 urb->status = -EOVERFLOW;
917 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
919 urb->actual_length += fifo_count;
920 if (len < qh->maxpacket) {
921 /* always terminate on short read; it's
922 * rarely reported as an error.
924 } else if (urb->actual_length <
925 urb->transfer_buffer_length)
929 request = (struct usb_ctrlrequest *) urb->setup_packet;
931 if (!request->wLength) {
932 DBG(4, "start no-DATA\n");
934 } else if (request->bRequestType & USB_DIR_IN) {
935 DBG(4, "start IN-DATA\n");
936 musb->ep0_stage = MUSB_EP0_IN;
940 DBG(4, "start OUT-DATA\n");
941 musb->ep0_stage = MUSB_EP0_OUT;
946 fifo_count = min_t(size_t, qh->maxpacket,
947 urb->transfer_buffer_length -
950 fifo_dest = (u8 *) (urb->transfer_buffer
951 + urb->actual_length);
952 DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
954 (fifo_count == 1) ? "" : "s",
956 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
958 urb->actual_length += fifo_count;
963 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
971 * Handle default endpoint interrupt as host. Only called in IRQ time
972 * from musb_interrupt().
974 * called with controller irqlocked
976 irqreturn_t musb_h_ep0_irq(struct musb *musb)
981 void __iomem *mbase = musb->mregs;
982 struct musb_hw_ep *hw_ep = musb->control_ep;
983 void __iomem *epio = hw_ep->regs;
984 struct musb_qh *qh = hw_ep->in_qh;
985 bool complete = false;
986 irqreturn_t retval = IRQ_NONE;
988 /* ep0 only has one queue, "in" */
991 musb_ep_select(mbase, 0);
992 csr = musb_readw(epio, MUSB_CSR0);
993 len = (csr & MUSB_CSR0_RXPKTRDY)
994 ? musb_readb(epio, MUSB_COUNT0)
997 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
998 csr, qh, len, urb, musb->ep0_stage);
1000 /* if we just did status stage, we are done */
1001 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1002 retval = IRQ_HANDLED;
1006 /* prepare status */
1007 if (csr & MUSB_CSR0_H_RXSTALL) {
1008 DBG(6, "STALLING ENDPOINT\n");
1011 } else if (csr & MUSB_CSR0_H_ERROR) {
1012 DBG(2, "no response, csr0 %04x\n", csr);
1015 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1016 DBG(2, "control NAK timeout\n");
1018 /* NOTE: this code path would be a good place to PAUSE a
1019 * control transfer, if another one is queued, so that
1020 * ep0 is more likely to stay busy. That's already done
1021 * for bulk RX transfers.
1023 * if (qh->ring.next != &musb->control), then
1024 * we have a candidate... NAKing is *NOT* an error
1026 musb_writew(epio, MUSB_CSR0, 0);
1027 retval = IRQ_HANDLED;
1031 DBG(6, "aborting\n");
1032 retval = IRQ_HANDLED;
1034 urb->status = status;
1037 /* use the proper sequence to abort the transfer */
1038 if (csr & MUSB_CSR0_H_REQPKT) {
1039 csr &= ~MUSB_CSR0_H_REQPKT;
1040 musb_writew(epio, MUSB_CSR0, csr);
1041 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1042 musb_writew(epio, MUSB_CSR0, csr);
1044 musb_h_ep0_flush_fifo(hw_ep);
1047 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1050 musb_writew(epio, MUSB_CSR0, 0);
1053 if (unlikely(!urb)) {
1054 /* stop endpoint since we have no place for its data, this
1055 * SHOULD NEVER HAPPEN! */
1056 ERR("no URB for end 0\n");
1058 musb_h_ep0_flush_fifo(hw_ep);
1063 /* call common logic and prepare response */
1064 if (musb_h_ep0_continue(musb, len, urb)) {
1065 /* more packets required */
1066 csr = (MUSB_EP0_IN == musb->ep0_stage)
1067 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1069 /* data transfer complete; perform status phase */
1070 if (usb_pipeout(urb->pipe)
1071 || !urb->transfer_buffer_length)
1072 csr = MUSB_CSR0_H_STATUSPKT
1073 | MUSB_CSR0_H_REQPKT;
1075 csr = MUSB_CSR0_H_STATUSPKT
1076 | MUSB_CSR0_TXPKTRDY;
1078 /* flag status stage */
1079 musb->ep0_stage = MUSB_EP0_STATUS;
1081 DBG(5, "ep0 STATUS, csr %04x\n", csr);
1084 musb_writew(epio, MUSB_CSR0, csr);
1085 retval = IRQ_HANDLED;
1087 musb->ep0_stage = MUSB_EP0_IDLE;
1089 /* call completion handler if done */
1091 musb_advance_schedule(musb, urb, hw_ep, 1);
1097 #ifdef CONFIG_USB_INVENTRA_DMA
1099 /* Host side TX (OUT) using Mentor DMA works as follows:
1101 - if queue was empty, Program Endpoint
1102 - ... which starts DMA to fifo in mode 1 or 0
1104 DMA Isr (transfer complete) -> TxAvail()
1105 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1106 only in musb_cleanup_urb)
1107 - TxPktRdy has to be set in mode 0 or for
1108 short packets in mode 1.
1113 /* Service a Tx-Available or dma completion irq for the endpoint */
1114 void musb_host_tx(struct musb *musb, u8 epnum)
1122 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1123 void __iomem *epio = hw_ep->regs;
1124 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1127 void __iomem *mbase = musb->mregs;
1128 struct dma_channel *dma;
1132 musb_ep_select(mbase, epnum);
1133 tx_csr = musb_readw(epio, MUSB_TXCSR);
1135 /* with CPPI, DMA sometimes triggers "extra" irqs */
1137 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1142 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1143 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1144 dma ? ", dma" : "");
1146 /* check for errors */
1147 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1148 /* dma was disabled, fifo flushed */
1149 DBG(3, "TX end %d stall\n", epnum);
1151 /* stall; record URB status */
1154 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1155 /* (NON-ISO) dma was disabled, fifo flushed */
1156 DBG(3, "TX 3strikes on ep=%d\n", epnum);
1158 status = -ETIMEDOUT;
1160 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1161 DBG(6, "TX end=%d device not responding\n", epnum);
1163 /* NOTE: this code path would be a good place to PAUSE a
1164 * transfer, if there's some other (nonperiodic) tx urb
1165 * that could use this fifo. (dma complicates it...)
1166 * That's already done for bulk RX transfers.
1168 * if (bulk && qh->ring.next != &musb->out_bulk), then
1169 * we have a candidate... NAKing is *NOT* an error
1171 musb_ep_select(mbase, epnum);
1172 musb_writew(epio, MUSB_TXCSR,
1173 MUSB_TXCSR_H_WZC_BITS
1174 | MUSB_TXCSR_TXPKTRDY);
1179 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1180 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1181 (void) musb->dma_controller->channel_abort(dma);
1184 /* do the proper sequence to abort the transfer in the
1185 * usb core; the dma engine should already be stopped.
1187 musb_h_tx_flush_fifo(hw_ep);
1188 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1189 | MUSB_TXCSR_DMAENAB
1190 | MUSB_TXCSR_H_ERROR
1191 | MUSB_TXCSR_H_RXSTALL
1192 | MUSB_TXCSR_H_NAKTIMEOUT
1195 musb_ep_select(mbase, epnum);
1196 musb_writew(epio, MUSB_TXCSR, tx_csr);
1197 /* REVISIT may need to clear FLUSHFIFO ... */
1198 musb_writew(epio, MUSB_TXCSR, tx_csr);
1199 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1204 /* second cppi case */
1205 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1206 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1210 if (is_dma_capable() && dma && !status) {
1212 * DMA has completed. But if we're using DMA mode 1 (multi
1213 * packet DMA), we need a terminal TXPKTRDY interrupt before
1214 * we can consider this transfer completed, lest we trash
1215 * its last packet when writing the next URB's data. So we
1216 * switch back to mode 0 to get that interrupt; we'll come
1217 * back here once it happens.
1219 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1221 * We shouldn't clear DMAMODE with DMAENAB set; so
1222 * clear them in a safe order. That should be OK
1223 * once TXPKTRDY has been set (and I've never seen
1224 * it being 0 at this moment -- DMA interrupt latency
1225 * is significant) but if it hasn't been then we have
1226 * no choice but to stop being polite and ignore the
1227 * programmer's guide... :-)
1229 * Note that we must write TXCSR with TXPKTRDY cleared
1230 * in order not to re-trigger the packet send (this bit
1231 * can't be cleared by CPU), and there's another caveat:
1232 * TXPKTRDY may be set shortly and then cleared in the
1233 * double-buffered FIFO mode, so we do an extra TXCSR
1234 * read for debouncing...
1236 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1237 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1238 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1239 MUSB_TXCSR_TXPKTRDY);
1240 musb_writew(epio, MUSB_TXCSR,
1241 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1243 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1244 MUSB_TXCSR_TXPKTRDY);
1245 musb_writew(epio, MUSB_TXCSR,
1246 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1249 * There is no guarantee that we'll get an interrupt
1250 * after clearing DMAMODE as we might have done this
1251 * too late (after TXPKTRDY was cleared by controller).
1252 * Re-read TXCSR as we have spoiled its previous value.
1254 tx_csr = musb_readw(epio, MUSB_TXCSR);
1258 * We may get here from a DMA completion or TXPKTRDY interrupt.
1259 * In any case, we must check the FIFO status here and bail out
1260 * only if the FIFO still has data -- that should prevent the
1261 * "missed" TXPKTRDY interrupts and deal with double-buffered
1264 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1265 DBG(2, "DMA complete but packet still in FIFO, "
1266 "CSR %04x\n", tx_csr);
1271 if (!status || dma || usb_pipeisoc(pipe)) {
1273 length = dma->actual_len;
1275 length = qh->segsize;
1276 qh->offset += length;
1278 if (usb_pipeisoc(pipe)) {
1279 struct usb_iso_packet_descriptor *d;
1281 d = urb->iso_frame_desc + qh->iso_idx;
1282 d->actual_length = length;
1284 if (++qh->iso_idx >= urb->number_of_packets) {
1294 /* see if we need to send more data, or ZLP */
1295 if (qh->segsize < qh->maxpacket)
1297 else if (qh->offset == urb->transfer_buffer_length
1298 && !(urb->transfer_flags
1302 offset = qh->offset;
1303 length = urb->transfer_buffer_length - offset;
1308 /* urb->status != -EINPROGRESS means request has been faulted,
1309 * so we must abort this transfer after cleanup
1311 if (urb->status != -EINPROGRESS) {
1314 status = urb->status;
1319 urb->status = status;
1320 urb->actual_length = qh->offset;
1321 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1323 } else if (usb_pipeisoc(pipe) && dma) {
1324 if (musb_tx_dma_program(musb->dma_controller, hw_ep, qh, urb,
1327 } else if (tx_csr & MUSB_TXCSR_DMAENAB) {
1328 DBG(1, "not complete, but DMA enabled?\n");
1333 * PIO: start next packet in this URB.
1335 * REVISIT: some docs say that when hw_ep->tx_double_buffered,
1336 * (and presumably, FIFO is not half-full) we should write *two*
1337 * packets before updating TXCSR; other docs disagree...
1339 if (length > qh->maxpacket)
1340 length = qh->maxpacket;
1341 musb_write_fifo(hw_ep, length, urb->transfer_buffer + offset);
1342 qh->segsize = length;
1344 musb_ep_select(mbase, epnum);
1345 musb_writew(epio, MUSB_TXCSR,
1346 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1350 #ifdef CONFIG_USB_INVENTRA_DMA
1352 /* Host side RX (IN) using Mentor DMA works as follows:
1354 - if queue was empty, ProgramEndpoint
1355 - first IN token is sent out (by setting ReqPkt)
1356 LinuxIsr -> RxReady()
1357 /\ => first packet is received
1358 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1359 | -> DMA Isr (transfer complete) -> RxReady()
1360 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1361 | - if urb not complete, send next IN token (ReqPkt)
1362 | | else complete urb.
1364 ---------------------------
1366 * Nuances of mode 1:
1367 * For short packets, no ack (+RxPktRdy) is sent automatically
1368 * (even if AutoClear is ON)
1369 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1370 * automatically => major problem, as collecting the next packet becomes
1371 * difficult. Hence mode 1 is not used.
1374 * All we care about at this driver level is that
1375 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1376 * (b) termination conditions are: short RX, or buffer full;
1377 * (c) fault modes include
1378 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1379 * (and that endpoint's dma queue stops immediately)
1380 * - overflow (full, PLUS more bytes in the terminal packet)
1382 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1383 * thus be a great candidate for using mode 1 ... for all but the
1384 * last packet of one URB's transfer.
1389 /* Schedule next QH from musb->in_bulk and move the current qh to
1390 * the end; avoids starvation for other endpoints.
1392 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1394 struct dma_channel *dma;
1396 void __iomem *mbase = musb->mregs;
1397 void __iomem *epio = ep->regs;
1398 struct musb_qh *cur_qh, *next_qh;
1401 musb_ep_select(mbase, ep->epnum);
1402 dma = is_dma_capable() ? ep->rx_channel : NULL;
1404 /* clear nak timeout bit */
1405 rx_csr = musb_readw(epio, MUSB_RXCSR);
1406 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1407 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1408 musb_writew(epio, MUSB_RXCSR, rx_csr);
1410 cur_qh = first_qh(&musb->in_bulk);
1412 urb = next_urb(cur_qh);
1413 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1414 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1415 musb->dma_controller->channel_abort(dma);
1416 urb->actual_length += dma->actual_len;
1417 dma->actual_len = 0L;
1419 musb_save_toggle(cur_qh, 1, urb);
1421 /* move cur_qh to end of queue */
1422 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1424 /* get the next qh from musb->in_bulk */
1425 next_qh = first_qh(&musb->in_bulk);
1427 /* set rx_reinit and schedule the next qh */
1429 musb_start_urb(musb, 1, next_qh);
1434 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1435 * and high-bandwidth IN transfer cases.
1437 void musb_host_rx(struct musb *musb, u8 epnum)
1440 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1441 void __iomem *epio = hw_ep->regs;
1442 struct musb_qh *qh = hw_ep->in_qh;
1444 void __iomem *mbase = musb->mregs;
1447 bool iso_err = false;
1450 struct dma_channel *dma;
1452 musb_ep_select(mbase, epnum);
1455 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1459 rx_csr = musb_readw(epio, MUSB_RXCSR);
1462 if (unlikely(!urb)) {
1463 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1464 * usbtest #11 (unlinks) triggers it regularly, sometimes
1465 * with fifo full. (Only with DMA??)
1467 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1468 musb_readw(epio, MUSB_RXCOUNT));
1469 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1475 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1476 epnum, rx_csr, urb->actual_length,
1477 dma ? dma->actual_len : 0);
1479 /* check for errors, concurrent stall & unlink is not really
1481 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1482 DBG(3, "RX end %d STALL\n", epnum);
1484 /* stall; record URB status */
1487 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1488 DBG(3, "end %d RX proto error\n", epnum);
1491 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1493 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1495 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1496 DBG(6, "RX end %d NAK timeout\n", epnum);
1498 /* NOTE: NAKing is *NOT* an error, so we want to
1499 * continue. Except ... if there's a request for
1500 * another QH, use that instead of starving it.
1502 * Devices like Ethernet and serial adapters keep
1503 * reads posted at all times, which will starve
1504 * other devices without this logic.
1506 if (usb_pipebulk(urb->pipe)
1508 && !list_is_singular(&musb->in_bulk)) {
1509 musb_bulk_rx_nak_timeout(musb, hw_ep);
1512 musb_ep_select(mbase, epnum);
1513 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1514 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1515 musb_writew(epio, MUSB_RXCSR, rx_csr);
1519 DBG(4, "RX end %d ISO data error\n", epnum);
1520 /* packet error reported later */
1525 /* faults abort the transfer */
1527 /* clean up dma and collect transfer count */
1528 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1529 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1530 (void) musb->dma_controller->channel_abort(dma);
1531 xfer_len = dma->actual_len;
1533 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1534 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1539 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1540 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1541 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1545 /* thorough shutdown for now ... given more precise fault handling
1546 * and better queueing support, we might keep a DMA pipeline going
1547 * while processing this irq for earlier completions.
1550 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1552 #ifndef CONFIG_USB_INVENTRA_DMA
1553 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1554 /* REVISIT this happened for a while on some short reads...
1555 * the cleanup still needs investigation... looks bad...
1556 * and also duplicates dma cleanup code above ... plus,
1557 * shouldn't this be the "half full" double buffer case?
1559 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1560 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1561 (void) musb->dma_controller->channel_abort(dma);
1562 xfer_len = dma->actual_len;
1566 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1567 xfer_len, dma ? ", dma" : "");
1568 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1570 musb_ep_select(mbase, epnum);
1571 musb_writew(epio, MUSB_RXCSR,
1572 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1575 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1576 xfer_len = dma->actual_len;
1578 val &= ~(MUSB_RXCSR_DMAENAB
1579 | MUSB_RXCSR_H_AUTOREQ
1580 | MUSB_RXCSR_AUTOCLEAR
1581 | MUSB_RXCSR_RXPKTRDY);
1582 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1584 #ifdef CONFIG_USB_INVENTRA_DMA
1585 if (usb_pipeisoc(pipe)) {
1586 struct usb_iso_packet_descriptor *d;
1588 d = urb->iso_frame_desc + qh->iso_idx;
1589 d->actual_length = xfer_len;
1591 /* even if there was an error, we did the dma
1592 * for iso_frame_desc->length
1594 if (d->status != EILSEQ && d->status != -EOVERFLOW)
1597 if (++qh->iso_idx >= urb->number_of_packets)
1603 /* done if urb buffer is full or short packet is recd */
1604 done = (urb->actual_length + xfer_len >=
1605 urb->transfer_buffer_length
1606 || dma->actual_len < qh->maxpacket);
1609 /* send IN token for next packet, without AUTOREQ */
1611 val |= MUSB_RXCSR_H_REQPKT;
1612 musb_writew(epio, MUSB_RXCSR,
1613 MUSB_RXCSR_H_WZC_BITS | val);
1616 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1617 done ? "off" : "reset",
1618 musb_readw(epio, MUSB_RXCSR),
1619 musb_readw(epio, MUSB_RXCOUNT));
1623 } else if (urb->status == -EINPROGRESS) {
1624 /* if no errors, be sure a packet is ready for unloading */
1625 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1627 ERR("Rx interrupt with no errors or packet!\n");
1629 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1632 /* do the proper sequence to abort the transfer */
1633 musb_ep_select(mbase, epnum);
1634 val &= ~MUSB_RXCSR_H_REQPKT;
1635 musb_writew(epio, MUSB_RXCSR, val);
1639 /* we are expecting IN packets */
1640 #ifdef CONFIG_USB_INVENTRA_DMA
1642 struct dma_controller *c;
1647 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1649 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1652 + urb->actual_length,
1654 urb->transfer_buffer_length);
1656 c = musb->dma_controller;
1658 if (usb_pipeisoc(pipe)) {
1660 struct usb_iso_packet_descriptor *d;
1662 d = urb->iso_frame_desc + qh->iso_idx;
1668 if (rx_count > d->length) {
1670 status = -EOVERFLOW;
1673 DBG(2, "** OVERFLOW %d into %d\n",\
1674 rx_count, d->length);
1680 buf = urb->transfer_dma + d->offset;
1683 buf = urb->transfer_dma +
1687 dma->desired_mode = 0;
1689 /* because of the issue below, mode 1 will
1690 * only rarely behave with correct semantics.
1692 if ((urb->transfer_flags &
1694 && (urb->transfer_buffer_length -
1697 dma->desired_mode = 1;
1698 if (rx_count < hw_ep->max_packet_sz_rx) {
1700 dma->bDesiredMode = 0;
1702 length = urb->transfer_buffer_length;
1706 /* Disadvantage of using mode 1:
1707 * It's basically usable only for mass storage class; essentially all
1708 * other protocols also terminate transfers on short packets.
1711 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1712 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1713 * to use the extra IN token to grab the last packet using mode 0, then
1714 * the problem is that you cannot be sure when the device will send the
1715 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1716 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1717 * transfer, while sometimes it is recd just a little late so that if you
1718 * try to configure for mode 0 soon after the mode 1 transfer is
1719 * completed, you will find rxcount 0. Okay, so you might think why not
1720 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1723 val = musb_readw(epio, MUSB_RXCSR);
1724 val &= ~MUSB_RXCSR_H_REQPKT;
1726 if (dma->desired_mode == 0)
1727 val &= ~MUSB_RXCSR_H_AUTOREQ;
1729 val |= MUSB_RXCSR_H_AUTOREQ;
1730 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1732 musb_writew(epio, MUSB_RXCSR,
1733 MUSB_RXCSR_H_WZC_BITS | val);
1735 /* REVISIT if when actual_length != 0,
1736 * transfer_buffer_length needs to be
1739 ret = c->channel_program(
1741 dma->desired_mode, buf, length);
1744 c->channel_release(dma);
1745 hw_ep->rx_channel = NULL;
1747 /* REVISIT reset CSR */
1750 #endif /* Mentor DMA */
1753 done = musb_host_packet_rx(musb, urb,
1755 DBG(6, "read %spacket\n", done ? "last " : "");
1760 urb->actual_length += xfer_len;
1761 qh->offset += xfer_len;
1763 if (urb->status == -EINPROGRESS)
1764 urb->status = status;
1765 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1769 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1770 * the software schedule associates multiple such nodes with a given
1771 * host side hardware endpoint + direction; scheduling may activate
1772 * that hardware endpoint.
1774 static int musb_schedule(
1781 int best_end, epnum;
1782 struct musb_hw_ep *hw_ep = NULL;
1783 struct list_head *head = NULL;
1785 /* use fixed hardware for control and bulk */
1786 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1787 head = &musb->control;
1788 hw_ep = musb->control_ep;
1792 /* else, periodic transfers get muxed to other endpoints */
1795 * We know this qh hasn't been scheduled, so all we need to do
1796 * is choose which hardware endpoint to put it on ...
1798 * REVISIT what we really want here is a regular schedule tree
1799 * like e.g. OHCI uses.
1804 for (epnum = 1, hw_ep = musb->endpoints + 1;
1805 epnum < musb->nr_endpoints;
1809 if (is_in || hw_ep->is_shared_fifo) {
1810 if (hw_ep->in_qh != NULL)
1812 } else if (hw_ep->out_qh != NULL)
1815 if (hw_ep == musb->bulk_ep)
1819 diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1821 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1823 if (diff >= 0 && best_diff > diff) {
1828 /* use bulk reserved ep1 if no other ep is free */
1829 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1830 hw_ep = musb->bulk_ep;
1832 head = &musb->in_bulk;
1834 head = &musb->out_bulk;
1836 /* Enable bulk RX NAK timeout scheme when bulk requests are
1837 * multiplexed. This scheme doen't work in high speed to full
1838 * speed scenario as NAK interrupts are not coming from a
1839 * full speed device connected to a high speed device.
1840 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1841 * 4 (8 frame or 8ms) for FS device.
1843 if (is_in && qh->dev)
1845 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1847 } else if (best_end < 0) {
1853 hw_ep = musb->endpoints + best_end;
1854 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1857 idle = list_empty(head);
1858 list_add_tail(&qh->ring, head);
1862 qh->hep->hcpriv = qh;
1864 musb_start_urb(musb, is_in, qh);
1868 static int musb_urb_enqueue(
1869 struct usb_hcd *hcd,
1873 unsigned long flags;
1874 struct musb *musb = hcd_to_musb(hcd);
1875 struct usb_host_endpoint *hep = urb->ep;
1877 struct usb_endpoint_descriptor *epd = &hep->desc;
1882 /* host role must be active */
1883 if (!is_host_active(musb) || !musb->is_active)
1886 spin_lock_irqsave(&musb->lock, flags);
1887 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1888 qh = ret ? NULL : hep->hcpriv;
1891 spin_unlock_irqrestore(&musb->lock, flags);
1893 /* DMA mapping was already done, if needed, and this urb is on
1894 * hep->urb_list now ... so we're done, unless hep wasn't yet
1895 * scheduled onto a live qh.
1897 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1898 * disabled, testing for empty qh->ring and avoiding qh setup costs
1899 * except for the first urb queued after a config change.
1904 /* Allocate and initialize qh, minimizing the work done each time
1905 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1907 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1908 * for bugs in other kernel code to break this driver...
1910 qh = kzalloc(sizeof *qh, mem_flags);
1912 spin_lock_irqsave(&musb->lock, flags);
1913 usb_hcd_unlink_urb_from_ep(hcd, urb);
1914 spin_unlock_irqrestore(&musb->lock, flags);
1920 INIT_LIST_HEAD(&qh->ring);
1923 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1925 /* no high bandwidth support yet */
1926 if (qh->maxpacket & ~0x7ff) {
1931 qh->epnum = usb_endpoint_num(epd);
1932 qh->type = usb_endpoint_type(epd);
1934 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1935 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1937 /* precompute rxtype/txtype/type0 register */
1938 type_reg = (qh->type << 4) | qh->epnum;
1939 switch (urb->dev->speed) {
1943 case USB_SPEED_FULL:
1949 qh->type_reg = type_reg;
1951 /* Precompute RXINTERVAL/TXINTERVAL register */
1953 case USB_ENDPOINT_XFER_INT:
1955 * Full/low speeds use the linear encoding,
1956 * high speed uses the logarithmic encoding.
1958 if (urb->dev->speed <= USB_SPEED_FULL) {
1959 interval = max_t(u8, epd->bInterval, 1);
1963 case USB_ENDPOINT_XFER_ISOC:
1964 /* ISO always uses logarithmic encoding */
1965 interval = min_t(u8, epd->bInterval, 16);
1968 /* REVISIT we actually want to use NAK limits, hinting to the
1969 * transfer scheduling logic to try some other qh, e.g. try
1972 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1974 * The downside of disabling this is that transfer scheduling
1975 * gets VERY unfair for nonperiodic transfers; a misbehaving
1976 * peripheral could make that hurt. That's perfectly normal
1977 * for reads from network or serial adapters ... so we have
1978 * partial NAKlimit support for bulk RX.
1980 * The upside of disabling it is simpler transfer scheduling.
1984 qh->intv_reg = interval;
1986 /* precompute addressing for external hub/tt ports */
1987 if (musb->is_multipoint) {
1988 struct usb_device *parent = urb->dev->parent;
1990 if (parent != hcd->self.root_hub) {
1991 qh->h_addr_reg = (u8) parent->devnum;
1993 /* set up tt info if needed */
1995 qh->h_port_reg = (u8) urb->dev->ttport;
1996 if (urb->dev->tt->hub)
1998 (u8) urb->dev->tt->hub->devnum;
1999 if (urb->dev->tt->multi)
2000 qh->h_addr_reg |= 0x80;
2005 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2006 * until we get real dma queues (with an entry for each urb/buffer),
2007 * we only have work to do in the former case.
2009 spin_lock_irqsave(&musb->lock, flags);
2011 /* some concurrent activity submitted another urb to hep...
2012 * odd, rare, error prone, but legal.
2017 ret = musb_schedule(musb, qh,
2018 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2022 /* FIXME set urb->start_frame for iso/intr, it's tested in
2023 * musb_start_urb(), but otherwise only konicawc cares ...
2026 spin_unlock_irqrestore(&musb->lock, flags);
2030 spin_lock_irqsave(&musb->lock, flags);
2031 usb_hcd_unlink_urb_from_ep(hcd, urb);
2032 spin_unlock_irqrestore(&musb->lock, flags);
2040 * abort a transfer that's at the head of a hardware queue.
2041 * called with controller locked, irqs blocked
2042 * that hardware queue advances to the next transfer, unless prevented
2044 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
2046 struct musb_hw_ep *ep = qh->hw_ep;
2047 void __iomem *epio = ep->regs;
2048 unsigned hw_end = ep->epnum;
2049 void __iomem *regs = ep->musb->mregs;
2053 musb_ep_select(regs, hw_end);
2055 if (is_dma_capable()) {
2056 struct dma_channel *dma;
2058 dma = is_in ? ep->rx_channel : ep->tx_channel;
2060 status = ep->musb->dma_controller->channel_abort(dma);
2062 "abort %cX%d DMA for urb %p --> %d\n",
2063 is_in ? 'R' : 'T', ep->epnum,
2065 urb->actual_length += dma->actual_len;
2069 /* turn off DMA requests, discard state, stop polling ... */
2071 /* giveback saves bulk toggle */
2072 csr = musb_h_flush_rxfifo(ep, 0);
2074 /* REVISIT we still get an irq; should likely clear the
2075 * endpoint's irq status here to avoid bogus irqs.
2076 * clearing that status is platform-specific...
2078 } else if (ep->epnum) {
2079 musb_h_tx_flush_fifo(ep);
2080 csr = musb_readw(epio, MUSB_TXCSR);
2081 csr &= ~(MUSB_TXCSR_AUTOSET
2082 | MUSB_TXCSR_DMAENAB
2083 | MUSB_TXCSR_H_RXSTALL
2084 | MUSB_TXCSR_H_NAKTIMEOUT
2085 | MUSB_TXCSR_H_ERROR
2086 | MUSB_TXCSR_TXPKTRDY);
2087 musb_writew(epio, MUSB_TXCSR, csr);
2088 /* REVISIT may need to clear FLUSHFIFO ... */
2089 musb_writew(epio, MUSB_TXCSR, csr);
2090 /* flush cpu writebuffer */
2091 csr = musb_readw(epio, MUSB_TXCSR);
2093 musb_h_ep0_flush_fifo(ep);
2096 musb_advance_schedule(ep->musb, urb, ep, is_in);
2100 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2102 struct musb *musb = hcd_to_musb(hcd);
2104 struct list_head *sched;
2105 unsigned long flags;
2108 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2109 usb_pipedevice(urb->pipe),
2110 usb_pipeendpoint(urb->pipe),
2111 usb_pipein(urb->pipe) ? "in" : "out");
2113 spin_lock_irqsave(&musb->lock, flags);
2114 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2122 /* Any URB not actively programmed into endpoint hardware can be
2123 * immediately given back; that's any URB not at the head of an
2124 * endpoint queue, unless someday we get real DMA queues. And even
2125 * if it's at the head, it might not be known to the hardware...
2127 * Otherwise abort current transfer, pending dma, etc.; urb->status
2128 * has already been updated. This is a synchronous abort; it'd be
2129 * OK to hold off until after some IRQ, though.
2131 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2135 case USB_ENDPOINT_XFER_CONTROL:
2136 sched = &musb->control;
2138 case USB_ENDPOINT_XFER_BULK:
2140 if (usb_pipein(urb->pipe))
2141 sched = &musb->in_bulk;
2143 sched = &musb->out_bulk;
2147 /* REVISIT when we get a schedule tree, periodic
2148 * transfers won't always be at the head of a
2149 * singleton queue...
2156 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2157 if (ret < 0 || (sched && qh != first_qh(sched))) {
2158 int ready = qh->is_ready;
2162 __musb_giveback(musb, urb, 0);
2163 qh->is_ready = ready;
2165 /* If nothing else (usually musb_giveback) is using it
2166 * and its URB list has emptied, recycle this qh.
2168 if (ready && list_empty(&qh->hep->urb_list)) {
2169 qh->hep->hcpriv = NULL;
2170 list_del(&qh->ring);
2174 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2176 spin_unlock_irqrestore(&musb->lock, flags);
2180 /* disable an endpoint */
2182 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2184 u8 epnum = hep->desc.bEndpointAddress;
2185 unsigned long flags;
2186 struct musb *musb = hcd_to_musb(hcd);
2187 u8 is_in = epnum & USB_DIR_IN;
2190 struct list_head *sched;
2192 spin_lock_irqsave(&musb->lock, flags);
2199 case USB_ENDPOINT_XFER_CONTROL:
2200 sched = &musb->control;
2202 case USB_ENDPOINT_XFER_BULK:
2205 sched = &musb->in_bulk;
2207 sched = &musb->out_bulk;
2211 /* REVISIT when we get a schedule tree, periodic transfers
2212 * won't always be at the head of a singleton queue...
2218 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2220 /* kick first urb off the hardware, if needed */
2222 if (!sched || qh == first_qh(sched)) {
2225 /* make software (then hardware) stop ASAP */
2227 urb->status = -ESHUTDOWN;
2230 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2232 /* Then nuke all the others ... and advance the
2233 * queue on hw_ep (e.g. bulk ring) when we're done.
2235 while (!list_empty(&hep->urb_list)) {
2237 urb->status = -ESHUTDOWN;
2238 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2241 /* Just empty the queue; the hardware is busy with
2242 * other transfers, and since !qh->is_ready nothing
2243 * will activate any of these as it advances.
2245 while (!list_empty(&hep->urb_list))
2246 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2249 list_del(&qh->ring);
2253 spin_unlock_irqrestore(&musb->lock, flags);
2256 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2258 struct musb *musb = hcd_to_musb(hcd);
2260 return musb_readw(musb->mregs, MUSB_FRAME);
2263 static int musb_h_start(struct usb_hcd *hcd)
2265 struct musb *musb = hcd_to_musb(hcd);
2267 /* NOTE: musb_start() is called when the hub driver turns
2268 * on port power, or when (OTG) peripheral starts.
2270 hcd->state = HC_STATE_RUNNING;
2271 musb->port1_status = 0;
2275 static void musb_h_stop(struct usb_hcd *hcd)
2277 musb_stop(hcd_to_musb(hcd));
2278 hcd->state = HC_STATE_HALT;
2281 static int musb_bus_suspend(struct usb_hcd *hcd)
2283 struct musb *musb = hcd_to_musb(hcd);
2285 if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2288 if (is_host_active(musb) && musb->is_active) {
2289 WARNING("trying to suspend as %s is_active=%i\n",
2290 otg_state_string(musb), musb->is_active);
2296 static int musb_bus_resume(struct usb_hcd *hcd)
2298 /* resuming child port does the work */
2302 const struct hc_driver musb_hc_driver = {
2303 .description = "musb-hcd",
2304 .product_desc = "MUSB HDRC host driver",
2305 .hcd_priv_size = sizeof(struct musb),
2306 .flags = HCD_USB2 | HCD_MEMORY,
2308 /* not using irq handler or reset hooks from usbcore, since
2309 * those must be shared with peripheral code for OTG configs
2312 .start = musb_h_start,
2313 .stop = musb_h_stop,
2315 .get_frame_number = musb_h_get_frame_number,
2317 .urb_enqueue = musb_urb_enqueue,
2318 .urb_dequeue = musb_urb_dequeue,
2319 .endpoint_disable = musb_h_disable,
2321 .hub_status_data = musb_hub_status_data,
2322 .hub_control = musb_hub_control,
2323 .bus_suspend = musb_bus_suspend,
2324 .bus_resume = musb_bus_resume,
2325 /* .start_port_reset = NULL, */
2326 /* .hub_irq_enable = NULL, */