2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
7 * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
23 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
24 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
25 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
26 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
29 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
30 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
38 #include <linux/delay.h>
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/errno.h>
42 #include <linux/init.h>
43 #include <linux/list.h>
45 #include "musb_core.h"
46 #include "musb_host.h"
49 /* MUSB HOST status 22-mar-2006
51 * - There's still lots of partial code duplication for fault paths, so
52 * they aren't handled as consistently as they need to be.
54 * - PIO mostly behaved when last tested.
55 * + including ep0, with all usbtest cases 9, 10
56 * + usbtest 14 (ep0out) doesn't seem to run at all
57 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
58 * configurations, but otherwise double buffering passes basic tests.
59 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
61 * - DMA (CPPI) ... partially behaves, not currently recommended
62 * + about 1/15 the speed of typical EHCI implementations (PCI)
63 * + RX, all too often reqpkt seems to misbehave after tx
64 * + TX, no known issues (other than evident silicon issue)
66 * - DMA (Mentor/OMAP) ...has at least toggle update problems
68 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
69 * starvation ... nothing yet for TX, interrupt, or bulk.
71 * - Not tested with HNP, but some SRP paths seem to behave.
73 * NOTE 24-August-2006:
75 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
76 * extra endpoint for periodic use enabling hub + keybd + mouse. That
77 * mostly works, except that with "usbnet" it's easy to trigger cases
78 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
79 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
80 * although ARP RX wins. (That test was done with a full speed link.)
85 * NOTE on endpoint usage:
87 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
88 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
89 * (Yes, bulk _could_ use more of the endpoints than that, and would even
92 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
93 * So far that scheduling is both dumb and optimistic: the endpoint will be
94 * "claimed" until its software queue is no longer refilled. No multiplexing
95 * of transfers between endpoints, or anything clever.
99 static void musb_ep_program(struct musb *musb, u8 epnum,
100 struct urb *urb, unsigned int nOut,
104 * Clear TX fifo. Needed to avoid BABBLE errors.
106 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
108 void __iomem *epio = ep->regs;
113 csr = musb_readw(epio, MUSB_TXCSR);
114 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
116 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
118 csr |= MUSB_TXCSR_FLUSHFIFO;
119 musb_writew(epio, MUSB_TXCSR, csr);
120 csr = musb_readw(epio, MUSB_TXCSR);
121 if (WARN(retries-- < 1,
122 "Could not flush host TX%d fifo: csr: %04x\n",
129 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
131 void __iomem *epio = ep->regs;
135 /* scrub any data left in the fifo */
137 csr = musb_readw(epio, MUSB_TXCSR);
138 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
140 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
141 csr = musb_readw(epio, MUSB_TXCSR);
145 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
148 /* and reset for the next transfer */
149 musb_writew(epio, MUSB_TXCSR, 0);
153 * Start transmit. Caller is responsible for locking shared resources.
154 * musb must be locked.
156 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
160 /* NOTE: no locks here; caller should lock and select EP */
162 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
163 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
164 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
166 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
167 musb_writew(ep->regs, MUSB_CSR0, txcsr);
172 static inline void musb_h_tx_dma_start(struct musb_hw_ep *ep)
176 /* NOTE: no locks here; caller should lock and select EP */
177 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
178 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
179 if (is_cppi_enabled())
180 txcsr |= MUSB_TXCSR_DMAMODE;
181 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
185 * Start the URB at the front of an endpoint's queue
186 * end must be claimed from the caller.
188 * Context: controller locked, irqs blocked
191 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
196 void __iomem *mbase = musb->mregs;
197 struct urb *urb = next_urb(qh);
198 struct musb_hw_ep *hw_ep = qh->hw_ep;
199 unsigned pipe = urb->pipe;
200 u8 address = usb_pipedevice(pipe);
201 int epnum = hw_ep->epnum;
203 /* initialize software qh state */
207 /* gather right source of data */
209 case USB_ENDPOINT_XFER_CONTROL:
210 /* control transfers always start with SETUP */
213 musb->ep0_stage = MUSB_EP0_START;
214 buf = urb->setup_packet;
217 case USB_ENDPOINT_XFER_ISOC:
220 buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
221 len = urb->iso_frame_desc[0].length;
223 default: /* bulk, interrupt */
224 /* actual_length may be nonzero on retry paths */
225 buf = urb->transfer_buffer + urb->actual_length;
226 len = urb->transfer_buffer_length - urb->actual_length;
229 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
230 qh, urb, address, qh->epnum,
231 is_in ? "in" : "out",
232 ({char *s; switch (qh->type) {
233 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
234 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
235 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
236 default: s = "-intr"; break;
240 /* Configure endpoint */
241 if (is_in || hw_ep->is_shared_fifo)
245 musb_ep_program(musb, epnum, urb, !is_in, buf, len);
247 /* transmit may have more work: start it when it is time */
251 /* determine if the time is right for a periodic transfer */
253 case USB_ENDPOINT_XFER_ISOC:
254 case USB_ENDPOINT_XFER_INT:
255 DBG(3, "check whether there's still time for periodic Tx\n");
257 frame = musb_readw(mbase, MUSB_FRAME);
258 /* FIXME this doesn't implement that scheduling policy ...
259 * or handle framecounter wrapping
261 if ((urb->transfer_flags & URB_ISO_ASAP)
262 || (frame >= urb->start_frame)) {
263 /* REVISIT the SOF irq handler shouldn't duplicate
264 * this code; and we don't init urb->start_frame...
269 qh->frame = urb->start_frame;
270 /* enable SOF interrupt so we can count down */
271 DBG(1, "SOF for %d\n", epnum);
272 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
273 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
279 DBG(4, "Start TX%d %s\n", epnum,
280 hw_ep->tx_channel ? "dma" : "pio");
282 if (!hw_ep->tx_channel)
283 musb_h_tx_start(hw_ep);
284 else if (is_cppi_enabled() || tusb_dma_omap())
285 musb_h_tx_dma_start(hw_ep);
289 /* caller owns controller lock, irqs are blocked */
291 __musb_giveback(struct musb *musb, struct urb *urb, int status)
292 __releases(musb->lock)
293 __acquires(musb->lock)
295 DBG(({ int level; switch (status) {
299 /* common/boring faults */
310 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
311 urb, urb->complete, status,
312 usb_pipedevice(urb->pipe),
313 usb_pipeendpoint(urb->pipe),
314 usb_pipein(urb->pipe) ? "in" : "out",
315 urb->actual_length, urb->transfer_buffer_length
318 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
319 spin_unlock(&musb->lock);
320 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
321 spin_lock(&musb->lock);
324 /* for bulk/interrupt endpoints only */
326 musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
328 struct usb_device *udev = urb->dev;
330 void __iomem *epio = ep->regs;
333 /* FIXME: the current Mentor DMA code seems to have
334 * problems getting toggle correct.
337 if (is_in || ep->is_shared_fifo)
343 csr = musb_readw(epio, MUSB_TXCSR);
344 usb_settoggle(udev, qh->epnum, 1,
345 (csr & MUSB_TXCSR_H_DATATOGGLE)
348 csr = musb_readw(epio, MUSB_RXCSR);
349 usb_settoggle(udev, qh->epnum, 0,
350 (csr & MUSB_RXCSR_H_DATATOGGLE)
355 /* caller owns controller lock, irqs are blocked */
356 static struct musb_qh *
357 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
359 struct musb_hw_ep *ep = qh->hw_ep;
360 struct musb *musb = ep->musb;
361 int is_in = usb_pipein(urb->pipe);
362 int ready = qh->is_ready;
364 /* save toggle eagerly, for paranoia */
366 case USB_ENDPOINT_XFER_BULK:
367 case USB_ENDPOINT_XFER_INT:
368 musb_save_toggle(ep, is_in, urb);
370 case USB_ENDPOINT_XFER_ISOC:
371 if (status == 0 && urb->error_count)
377 __musb_giveback(musb, urb, status);
378 qh->is_ready = ready;
380 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
381 * invalidate qh as soon as list_empty(&hep->urb_list)
383 if (list_empty(&qh->hep->urb_list)) {
384 struct list_head *head;
391 /* clobber old pointers to this qh */
392 if (is_in || ep->is_shared_fifo)
396 qh->hep->hcpriv = NULL;
400 case USB_ENDPOINT_XFER_CONTROL:
401 case USB_ENDPOINT_XFER_BULK:
402 /* fifo policy for these lists, except that NAKing
403 * should rotate a qh to the end (for fairness).
406 head = qh->ring.prev;
413 case USB_ENDPOINT_XFER_ISOC:
414 case USB_ENDPOINT_XFER_INT:
415 /* this is where periodic bandwidth should be
416 * de-allocated if it's tracked and allocated;
417 * and where we'd update the schedule tree...
428 * Advance this hardware endpoint's queue, completing the specified urb and
429 * advancing to either the next urb queued to that qh, or else invalidating
430 * that qh and advancing to the next qh scheduled after the current one.
432 * Context: caller owns controller lock, irqs are blocked
435 musb_advance_schedule(struct musb *musb, struct urb *urb,
436 struct musb_hw_ep *hw_ep, int is_in)
440 if (is_in || hw_ep->is_shared_fifo)
445 if (urb->status == -EINPROGRESS)
446 qh = musb_giveback(qh, urb, 0);
448 qh = musb_giveback(qh, urb, urb->status);
450 if (qh != NULL && qh->is_ready) {
451 DBG(4, "... next ep%d %cX urb %p\n",
452 hw_ep->epnum, is_in ? 'R' : 'T',
454 musb_start_urb(musb, is_in, qh);
458 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
460 /* we don't want fifo to fill itself again;
461 * ignore dma (various models),
462 * leave toggle alone (may not have been saved yet)
464 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
465 csr &= ~(MUSB_RXCSR_H_REQPKT
466 | MUSB_RXCSR_H_AUTOREQ
467 | MUSB_RXCSR_AUTOCLEAR);
469 /* write 2x to allow double buffering */
470 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
471 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
473 /* flush writebuffer */
474 return musb_readw(hw_ep->regs, MUSB_RXCSR);
478 * PIO RX for a packet (or part of it).
481 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
489 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
490 void __iomem *epio = hw_ep->regs;
491 struct musb_qh *qh = hw_ep->in_qh;
492 int pipe = urb->pipe;
493 void *buffer = urb->transfer_buffer;
495 /* musb_ep_select(mbase, epnum); */
496 rx_count = musb_readw(epio, MUSB_RXCOUNT);
497 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
498 urb->transfer_buffer, qh->offset,
499 urb->transfer_buffer_length);
502 if (usb_pipeisoc(pipe)) {
504 struct usb_iso_packet_descriptor *d;
511 d = urb->iso_frame_desc + qh->iso_idx;
512 buf = buffer + d->offset;
514 if (rx_count > length) {
519 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
523 urb->actual_length += length;
524 d->actual_length = length;
528 /* see if we are done */
529 done = (++qh->iso_idx >= urb->number_of_packets);
532 buf = buffer + qh->offset;
533 length = urb->transfer_buffer_length - qh->offset;
534 if (rx_count > length) {
535 if (urb->status == -EINPROGRESS)
536 urb->status = -EOVERFLOW;
537 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
541 urb->actual_length += length;
542 qh->offset += length;
544 /* see if we are done */
545 done = (urb->actual_length == urb->transfer_buffer_length)
546 || (rx_count < qh->maxpacket)
547 || (urb->status != -EINPROGRESS);
549 && (urb->status == -EINPROGRESS)
550 && (urb->transfer_flags & URB_SHORT_NOT_OK)
551 && (urb->actual_length
552 < urb->transfer_buffer_length))
553 urb->status = -EREMOTEIO;
556 musb_read_fifo(hw_ep, length, buf);
558 csr = musb_readw(epio, MUSB_RXCSR);
559 csr |= MUSB_RXCSR_H_WZC_BITS;
560 if (unlikely(do_flush))
561 musb_h_flush_rxfifo(hw_ep, csr);
563 /* REVISIT this assumes AUTOCLEAR is never set */
564 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
566 csr |= MUSB_RXCSR_H_REQPKT;
567 musb_writew(epio, MUSB_RXCSR, csr);
573 /* we don't always need to reinit a given side of an endpoint...
574 * when we do, use tx/rx reinit routine and then construct a new CSR
575 * to address data toggle, NYET, and DMA or PIO.
577 * it's possible that driver bugs (especially for DMA) or aborting a
578 * transfer might have left the endpoint busier than it should be.
579 * the busy/not-empty tests are basically paranoia.
582 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
586 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
587 * That always uses tx_reinit since ep0 repurposes TX register
588 * offsets; the initial SETUP packet is also a kind of OUT.
591 /* if programmed for Tx, put it in RX mode */
592 if (ep->is_shared_fifo) {
593 csr = musb_readw(ep->regs, MUSB_TXCSR);
594 if (csr & MUSB_TXCSR_MODE) {
595 musb_h_tx_flush_fifo(ep);
596 csr = musb_readw(ep->regs, MUSB_TXCSR);
597 musb_writew(ep->regs, MUSB_TXCSR,
598 csr | MUSB_TXCSR_FRCDATATOG);
602 * Clear the MODE bit (and everything else) to enable Rx.
603 * NOTE: we mustn't clear the DMAMODE bit before DMAENAB.
605 if (csr & MUSB_TXCSR_DMAMODE)
606 musb_writew(ep->regs, MUSB_TXCSR, MUSB_TXCSR_DMAMODE);
607 musb_writew(ep->regs, MUSB_TXCSR, 0);
609 /* scrub all previous state, clearing toggle */
611 csr = musb_readw(ep->regs, MUSB_RXCSR);
612 if (csr & MUSB_RXCSR_RXPKTRDY)
613 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
614 musb_readw(ep->regs, MUSB_RXCOUNT));
616 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
619 /* target addr and (for multipoint) hub addr/port */
620 if (musb->is_multipoint) {
621 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
622 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
623 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
626 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
628 /* protocol/endpoint, interval/NAKlimit, i/o size */
629 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
630 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
631 /* NOTE: bulk combining rewrites high bits of maxpacket */
632 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
639 * Program an HDRC endpoint as per the given URB
640 * Context: irqs blocked, controller lock held
642 static void musb_ep_program(struct musb *musb, u8 epnum,
643 struct urb *urb, unsigned int is_out,
646 struct dma_controller *dma_controller;
647 struct dma_channel *dma_channel;
649 void __iomem *mbase = musb->mregs;
650 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
651 void __iomem *epio = hw_ep->regs;
655 if (!is_out || hw_ep->is_shared_fifo)
660 packet_sz = qh->maxpacket;
662 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
663 "h_addr%02x h_port%02x bytes %d\n",
664 is_out ? "-->" : "<--",
665 epnum, urb, urb->dev->speed,
666 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
667 qh->h_addr_reg, qh->h_port_reg,
670 musb_ep_select(mbase, epnum);
672 /* candidate for DMA? */
673 dma_controller = musb->dma_controller;
674 if (is_dma_capable() && epnum && dma_controller) {
675 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
677 dma_channel = dma_controller->channel_alloc(
678 dma_controller, hw_ep, is_out);
680 hw_ep->tx_channel = dma_channel;
682 hw_ep->rx_channel = dma_channel;
687 /* make sure we clear DMAEnab, autoSet bits from previous run */
689 /* OUT/transmit/EP0 or IN/receive? */
695 csr = musb_readw(epio, MUSB_TXCSR);
697 /* disable interrupt in case we flush */
698 int_txe = musb_readw(mbase, MUSB_INTRTXE);
699 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
701 /* general endpoint setup */
703 /* flush all old state, set default */
704 musb_h_tx_flush_fifo(hw_ep);
707 * We must not clear the DMAMODE bit before or in
708 * the same cycle with the DMAENAB bit, so we clear
709 * the latter first...
711 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
714 | MUSB_TXCSR_FRCDATATOG
715 | MUSB_TXCSR_H_RXSTALL
717 | MUSB_TXCSR_TXPKTRDY
719 csr |= MUSB_TXCSR_MODE;
721 if (usb_gettoggle(urb->dev, qh->epnum, 1))
722 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
723 | MUSB_TXCSR_H_DATATOGGLE;
725 csr |= MUSB_TXCSR_CLRDATATOG;
727 musb_writew(epio, MUSB_TXCSR, csr);
728 /* REVISIT may need to clear FLUSHFIFO ... */
729 csr &= ~MUSB_TXCSR_DMAMODE;
730 musb_writew(epio, MUSB_TXCSR, csr);
731 csr = musb_readw(epio, MUSB_TXCSR);
733 /* endpoint 0: just flush */
734 musb_h_ep0_flush_fifo(hw_ep);
737 /* target addr and (for multipoint) hub addr/port */
738 if (musb->is_multipoint) {
739 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
740 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
741 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
742 /* FIXME if !epnum, do the same for RX ... */
744 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
746 /* protocol/endpoint/interval/NAKlimit */
748 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
749 if (can_bulk_split(musb, qh->type))
750 musb_writew(epio, MUSB_TXMAXP,
752 | ((hw_ep->max_packet_sz_tx /
753 packet_sz) - 1) << 11);
755 musb_writew(epio, MUSB_TXMAXP,
757 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
759 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
760 if (musb->is_multipoint)
761 musb_writeb(epio, MUSB_TYPE0,
765 if (can_bulk_split(musb, qh->type))
766 load_count = min((u32) hw_ep->max_packet_sz_tx,
769 load_count = min((u32) packet_sz, len);
771 #ifdef CONFIG_USB_INVENTRA_DMA
773 qh->segsize = min(len, dma_channel->max_len);
774 if (qh->segsize <= packet_sz)
775 dma_channel->desired_mode = 0;
777 dma_channel->desired_mode = 1;
779 if (dma_channel->desired_mode == 0) {
780 /* Against the programming guide */
781 csr |= (MUSB_TXCSR_DMAENAB);
783 csr |= (MUSB_TXCSR_AUTOSET
785 | MUSB_TXCSR_DMAMODE);
786 musb_writew(epio, MUSB_TXCSR, csr);
788 dma_ok = dma_controller->channel_program(
789 dma_channel, packet_sz,
790 dma_channel->desired_mode,
796 dma_controller->channel_release(dma_channel);
798 hw_ep->tx_channel = NULL;
800 hw_ep->rx_channel = NULL;
804 * The programming guide says that we must
805 * clear the DMAENAB bit before DMAMODE...
807 csr = musb_readw(epio, MUSB_TXCSR);
808 csr &= ~(MUSB_TXCSR_DMAENAB
809 | MUSB_TXCSR_AUTOSET);
810 musb_writew(epio, MUSB_TXCSR, csr);
811 csr &= ~MUSB_TXCSR_DMAMODE;
812 musb_writew(epio, MUSB_TXCSR, csr);
817 /* candidate for DMA */
818 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
820 /* Defer enabling DMA */
821 dma_channel->actual_len = 0L;
824 /* TX uses "rndis" mode automatically, but needs help
825 * to identify the zero-length-final-packet case.
827 dma_ok = dma_controller->channel_program(
828 dma_channel, packet_sz,
837 dma_controller->channel_release(dma_channel);
838 hw_ep->tx_channel = NULL;
841 /* REVISIT there's an error path here that
842 * needs handling: can't do dma, but
843 * there's no pio buffer address...
849 /* PIO to load FIFO */
850 qh->segsize = load_count;
851 musb_write_fifo(hw_ep, load_count, buf);
854 /* re-enable interrupt */
855 musb_writew(mbase, MUSB_INTRTXE, int_txe);
861 if (hw_ep->rx_reinit) {
862 musb_rx_reinit(musb, qh, hw_ep);
864 /* init new state: toggle and NYET, maybe DMA later */
865 if (usb_gettoggle(urb->dev, qh->epnum, 0))
866 csr = MUSB_RXCSR_H_WR_DATATOGGLE
867 | MUSB_RXCSR_H_DATATOGGLE;
870 if (qh->type == USB_ENDPOINT_XFER_INT)
871 csr |= MUSB_RXCSR_DISNYET;
874 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
876 if (csr & (MUSB_RXCSR_RXPKTRDY
878 | MUSB_RXCSR_H_REQPKT))
879 ERR("broken !rx_reinit, ep%d csr %04x\n",
882 /* scrub any stale state, leaving toggle alone */
883 csr &= MUSB_RXCSR_DISNYET;
886 /* kick things off */
888 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
889 /* candidate for DMA */
891 dma_channel->actual_len = 0L;
894 /* AUTOREQ is in a DMA register */
895 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
896 csr = musb_readw(hw_ep->regs,
899 /* unless caller treats short rx transfers as
900 * errors, we dare not queue multiple transfers.
902 dma_ok = dma_controller->channel_program(
903 dma_channel, packet_sz,
904 !(urb->transfer_flags
909 dma_controller->channel_release(
911 hw_ep->rx_channel = NULL;
914 csr |= MUSB_RXCSR_DMAENAB;
918 csr |= MUSB_RXCSR_H_REQPKT;
919 DBG(7, "RXCSR%d := %04x\n", epnum, csr);
920 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
921 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
927 * Service the default endpoint (ep0) as host.
928 * Return true until it's time to start the status stage.
930 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
933 u8 *fifo_dest = NULL;
935 struct musb_hw_ep *hw_ep = musb->control_ep;
936 struct musb_qh *qh = hw_ep->in_qh;
937 struct usb_ctrlrequest *request;
939 switch (musb->ep0_stage) {
941 fifo_dest = urb->transfer_buffer + urb->actual_length;
942 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
944 if (fifo_count < len)
945 urb->status = -EOVERFLOW;
947 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
949 urb->actual_length += fifo_count;
950 if (len < qh->maxpacket) {
951 /* always terminate on short read; it's
952 * rarely reported as an error.
954 } else if (urb->actual_length <
955 urb->transfer_buffer_length)
959 request = (struct usb_ctrlrequest *) urb->setup_packet;
961 if (!request->wLength) {
962 DBG(4, "start no-DATA\n");
964 } else if (request->bRequestType & USB_DIR_IN) {
965 DBG(4, "start IN-DATA\n");
966 musb->ep0_stage = MUSB_EP0_IN;
970 DBG(4, "start OUT-DATA\n");
971 musb->ep0_stage = MUSB_EP0_OUT;
976 fifo_count = min_t(size_t, qh->maxpacket,
977 urb->transfer_buffer_length -
980 fifo_dest = (u8 *) (urb->transfer_buffer
981 + urb->actual_length);
982 DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
984 (fifo_count == 1) ? "" : "s",
986 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
988 urb->actual_length += fifo_count;
993 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1001 * Handle default endpoint interrupt as host. Only called in IRQ time
1002 * from musb_interrupt().
1004 * called with controller irqlocked
1006 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1011 void __iomem *mbase = musb->mregs;
1012 struct musb_hw_ep *hw_ep = musb->control_ep;
1013 void __iomem *epio = hw_ep->regs;
1014 struct musb_qh *qh = hw_ep->in_qh;
1015 bool complete = false;
1016 irqreturn_t retval = IRQ_NONE;
1018 /* ep0 only has one queue, "in" */
1021 musb_ep_select(mbase, 0);
1022 csr = musb_readw(epio, MUSB_CSR0);
1023 len = (csr & MUSB_CSR0_RXPKTRDY)
1024 ? musb_readb(epio, MUSB_COUNT0)
1027 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1028 csr, qh, len, urb, musb->ep0_stage);
1030 /* if we just did status stage, we are done */
1031 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1032 retval = IRQ_HANDLED;
1036 /* prepare status */
1037 if (csr & MUSB_CSR0_H_RXSTALL) {
1038 DBG(6, "STALLING ENDPOINT\n");
1041 } else if (csr & MUSB_CSR0_H_ERROR) {
1042 DBG(2, "no response, csr0 %04x\n", csr);
1045 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1046 DBG(2, "control NAK timeout\n");
1048 /* NOTE: this code path would be a good place to PAUSE a
1049 * control transfer, if another one is queued, so that
1050 * ep0 is more likely to stay busy. That's already done
1051 * for bulk RX transfers.
1053 * if (qh->ring.next != &musb->control), then
1054 * we have a candidate... NAKing is *NOT* an error
1056 musb_writew(epio, MUSB_CSR0, 0);
1057 retval = IRQ_HANDLED;
1061 DBG(6, "aborting\n");
1062 retval = IRQ_HANDLED;
1064 urb->status = status;
1067 /* use the proper sequence to abort the transfer */
1068 if (csr & MUSB_CSR0_H_REQPKT) {
1069 csr &= ~MUSB_CSR0_H_REQPKT;
1070 musb_writew(epio, MUSB_CSR0, csr);
1071 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1072 musb_writew(epio, MUSB_CSR0, csr);
1074 musb_h_ep0_flush_fifo(hw_ep);
1077 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1080 musb_writew(epio, MUSB_CSR0, 0);
1083 if (unlikely(!urb)) {
1084 /* stop endpoint since we have no place for its data, this
1085 * SHOULD NEVER HAPPEN! */
1086 ERR("no URB for end 0\n");
1088 musb_h_ep0_flush_fifo(hw_ep);
1093 /* call common logic and prepare response */
1094 if (musb_h_ep0_continue(musb, len, urb)) {
1095 /* more packets required */
1096 csr = (MUSB_EP0_IN == musb->ep0_stage)
1097 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1099 /* data transfer complete; perform status phase */
1100 if (usb_pipeout(urb->pipe)
1101 || !urb->transfer_buffer_length)
1102 csr = MUSB_CSR0_H_STATUSPKT
1103 | MUSB_CSR0_H_REQPKT;
1105 csr = MUSB_CSR0_H_STATUSPKT
1106 | MUSB_CSR0_TXPKTRDY;
1108 /* flag status stage */
1109 musb->ep0_stage = MUSB_EP0_STATUS;
1111 DBG(5, "ep0 STATUS, csr %04x\n", csr);
1114 musb_writew(epio, MUSB_CSR0, csr);
1115 retval = IRQ_HANDLED;
1117 musb->ep0_stage = MUSB_EP0_IDLE;
1119 /* call completion handler if done */
1121 musb_advance_schedule(musb, urb, hw_ep, 1);
1127 #ifdef CONFIG_USB_INVENTRA_DMA
1129 /* Host side TX (OUT) using Mentor DMA works as follows:
1131 - if queue was empty, Program Endpoint
1132 - ... which starts DMA to fifo in mode 1 or 0
1134 DMA Isr (transfer complete) -> TxAvail()
1135 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1136 only in musb_cleanup_urb)
1137 - TxPktRdy has to be set in mode 0 or for
1138 short packets in mode 1.
1143 /* Service a Tx-Available or dma completion irq for the endpoint */
1144 void musb_host_tx(struct musb *musb, u8 epnum)
1152 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1153 void __iomem *epio = hw_ep->regs;
1154 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1157 void __iomem *mbase = musb->mregs;
1158 struct dma_channel *dma;
1162 musb_ep_select(mbase, epnum);
1163 tx_csr = musb_readw(epio, MUSB_TXCSR);
1165 /* with CPPI, DMA sometimes triggers "extra" irqs */
1167 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1172 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1173 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1174 dma ? ", dma" : "");
1176 /* check for errors */
1177 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1178 /* dma was disabled, fifo flushed */
1179 DBG(3, "TX end %d stall\n", epnum);
1181 /* stall; record URB status */
1184 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1185 /* (NON-ISO) dma was disabled, fifo flushed */
1186 DBG(3, "TX 3strikes on ep=%d\n", epnum);
1188 status = -ETIMEDOUT;
1190 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1191 DBG(6, "TX end=%d device not responding\n", epnum);
1193 /* NOTE: this code path would be a good place to PAUSE a
1194 * transfer, if there's some other (nonperiodic) tx urb
1195 * that could use this fifo. (dma complicates it...)
1196 * That's already done for bulk RX transfers.
1198 * if (bulk && qh->ring.next != &musb->out_bulk), then
1199 * we have a candidate... NAKing is *NOT* an error
1201 musb_ep_select(mbase, epnum);
1202 musb_writew(epio, MUSB_TXCSR,
1203 MUSB_TXCSR_H_WZC_BITS
1204 | MUSB_TXCSR_TXPKTRDY);
1209 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1210 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1211 (void) musb->dma_controller->channel_abort(dma);
1214 /* do the proper sequence to abort the transfer in the
1215 * usb core; the dma engine should already be stopped.
1217 musb_h_tx_flush_fifo(hw_ep);
1218 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1219 | MUSB_TXCSR_DMAENAB
1220 | MUSB_TXCSR_H_ERROR
1221 | MUSB_TXCSR_H_RXSTALL
1222 | MUSB_TXCSR_H_NAKTIMEOUT
1225 musb_ep_select(mbase, epnum);
1226 musb_writew(epio, MUSB_TXCSR, tx_csr);
1227 /* REVISIT may need to clear FLUSHFIFO ... */
1228 musb_writew(epio, MUSB_TXCSR, tx_csr);
1229 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1234 /* second cppi case */
1235 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1236 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1241 if (is_dma_capable() && dma && !status) {
1243 * DMA has completed. But if we're using DMA mode 1 (multi
1244 * packet DMA), we need a terminal TXPKTRDY interrupt before
1245 * we can consider this transfer completed, lest we trash
1246 * its last packet when writing the next URB's data. So we
1247 * switch back to mode 0 to get that interrupt; we'll come
1248 * back here once it happens.
1250 if (tx_csr & MUSB_TXCSR_DMAMODE) {
1252 * We shouldn't clear DMAMODE with DMAENAB set; so
1253 * clear them in a safe order. That should be OK
1254 * once TXPKTRDY has been set (and I've never seen
1255 * it being 0 at this moment -- DMA interrupt latency
1256 * is significant) but if it hasn't been then we have
1257 * no choice but to stop being polite and ignore the
1258 * programmer's guide... :-)
1260 * Note that we must write TXCSR with TXPKTRDY cleared
1261 * in order not to re-trigger the packet send (this bit
1262 * can't be cleared by CPU), and there's another caveat:
1263 * TXPKTRDY may be set shortly and then cleared in the
1264 * double-buffered FIFO mode, so we do an extra TXCSR
1265 * read for debouncing...
1267 tx_csr &= musb_readw(epio, MUSB_TXCSR);
1268 if (tx_csr & MUSB_TXCSR_TXPKTRDY) {
1269 tx_csr &= ~(MUSB_TXCSR_DMAENAB |
1270 MUSB_TXCSR_TXPKTRDY);
1271 musb_writew(epio, MUSB_TXCSR,
1272 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1274 tx_csr &= ~(MUSB_TXCSR_DMAMODE |
1275 MUSB_TXCSR_TXPKTRDY);
1276 musb_writew(epio, MUSB_TXCSR,
1277 tx_csr | MUSB_TXCSR_H_WZC_BITS);
1280 * There is no guarantee that we'll get an interrupt
1281 * after clearing DMAMODE as we might have done this
1282 * too late (after TXPKTRDY was cleared by controller).
1283 * Re-read TXCSR as we have spoiled its previous value.
1285 tx_csr = musb_readw(epio, MUSB_TXCSR);
1289 * We may get here from a DMA completion or TXPKTRDY interrupt.
1290 * In any case, we must check the FIFO status here and bail out
1291 * only if the FIFO still has data -- that should prevent the
1292 * "missed" TXPKTRDY interrupts and deal with double-buffered
1295 if (tx_csr & (MUSB_TXCSR_FIFONOTEMPTY | MUSB_TXCSR_TXPKTRDY)) {
1296 DBG(2, "DMA complete but packet still in FIFO, "
1297 "CSR %04x\n", tx_csr);
1302 /* REVISIT this looks wrong... */
1303 if (!status || dma || usb_pipeisoc(pipe)) {
1305 wLength = dma->actual_len;
1307 wLength = qh->segsize;
1308 qh->offset += wLength;
1310 if (usb_pipeisoc(pipe)) {
1311 struct usb_iso_packet_descriptor *d;
1313 d = urb->iso_frame_desc + qh->iso_idx;
1314 d->actual_length = qh->segsize;
1315 if (++qh->iso_idx >= urb->number_of_packets) {
1319 buf = urb->transfer_buffer + d->offset;
1320 wLength = d->length;
1325 /* see if we need to send more data, or ZLP */
1326 if (qh->segsize < qh->maxpacket)
1328 else if (qh->offset == urb->transfer_buffer_length
1329 && !(urb->transfer_flags
1333 buf = urb->transfer_buffer
1335 wLength = urb->transfer_buffer_length
1341 /* urb->status != -EINPROGRESS means request has been faulted,
1342 * so we must abort this transfer after cleanup
1344 if (urb->status != -EINPROGRESS) {
1347 status = urb->status;
1352 urb->status = status;
1353 urb->actual_length = qh->offset;
1354 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1356 } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1357 /* WARN_ON(!buf); */
1359 /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1360 * (and presumably, fifo is not half-full) we should write TWO
1361 * packets before updating TXCSR ... other docs disagree ...
1363 /* PIO: start next packet in this URB */
1364 if (wLength > qh->maxpacket)
1365 wLength = qh->maxpacket;
1366 musb_write_fifo(hw_ep, wLength, buf);
1367 qh->segsize = wLength;
1369 musb_ep_select(mbase, epnum);
1370 musb_writew(epio, MUSB_TXCSR,
1371 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1373 DBG(1, "not complete, but dma enabled?\n");
1380 #ifdef CONFIG_USB_INVENTRA_DMA
1382 /* Host side RX (IN) using Mentor DMA works as follows:
1384 - if queue was empty, ProgramEndpoint
1385 - first IN token is sent out (by setting ReqPkt)
1386 LinuxIsr -> RxReady()
1387 /\ => first packet is received
1388 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1389 | -> DMA Isr (transfer complete) -> RxReady()
1390 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1391 | - if urb not complete, send next IN token (ReqPkt)
1392 | | else complete urb.
1394 ---------------------------
1396 * Nuances of mode 1:
1397 * For short packets, no ack (+RxPktRdy) is sent automatically
1398 * (even if AutoClear is ON)
1399 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1400 * automatically => major problem, as collecting the next packet becomes
1401 * difficult. Hence mode 1 is not used.
1404 * All we care about at this driver level is that
1405 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1406 * (b) termination conditions are: short RX, or buffer full;
1407 * (c) fault modes include
1408 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1409 * (and that endpoint's dma queue stops immediately)
1410 * - overflow (full, PLUS more bytes in the terminal packet)
1412 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1413 * thus be a great candidate for using mode 1 ... for all but the
1414 * last packet of one URB's transfer.
1419 /* Schedule next QH from musb->in_bulk and move the current qh to
1420 * the end; avoids starvation for other endpoints.
1422 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1424 struct dma_channel *dma;
1426 void __iomem *mbase = musb->mregs;
1427 void __iomem *epio = ep->regs;
1428 struct musb_qh *cur_qh, *next_qh;
1431 musb_ep_select(mbase, ep->epnum);
1432 dma = is_dma_capable() ? ep->rx_channel : NULL;
1434 /* clear nak timeout bit */
1435 rx_csr = musb_readw(epio, MUSB_RXCSR);
1436 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1437 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1438 musb_writew(epio, MUSB_RXCSR, rx_csr);
1440 cur_qh = first_qh(&musb->in_bulk);
1442 urb = next_urb(cur_qh);
1443 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1444 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1445 musb->dma_controller->channel_abort(dma);
1446 urb->actual_length += dma->actual_len;
1447 dma->actual_len = 0L;
1449 musb_save_toggle(ep, 1, urb);
1451 /* move cur_qh to end of queue */
1452 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1454 /* get the next qh from musb->in_bulk */
1455 next_qh = first_qh(&musb->in_bulk);
1457 /* set rx_reinit and schedule the next qh */
1459 musb_start_urb(musb, 1, next_qh);
1464 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1465 * and high-bandwidth IN transfer cases.
1467 void musb_host_rx(struct musb *musb, u8 epnum)
1470 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1471 void __iomem *epio = hw_ep->regs;
1472 struct musb_qh *qh = hw_ep->in_qh;
1474 void __iomem *mbase = musb->mregs;
1477 bool iso_err = false;
1480 struct dma_channel *dma;
1482 musb_ep_select(mbase, epnum);
1485 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1489 rx_csr = musb_readw(epio, MUSB_RXCSR);
1492 if (unlikely(!urb)) {
1493 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1494 * usbtest #11 (unlinks) triggers it regularly, sometimes
1495 * with fifo full. (Only with DMA??)
1497 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1498 musb_readw(epio, MUSB_RXCOUNT));
1499 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1505 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1506 epnum, rx_csr, urb->actual_length,
1507 dma ? dma->actual_len : 0);
1509 /* check for errors, concurrent stall & unlink is not really
1511 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1512 DBG(3, "RX end %d STALL\n", epnum);
1514 /* stall; record URB status */
1517 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1518 DBG(3, "end %d RX proto error\n", epnum);
1521 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1523 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1525 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1526 DBG(6, "RX end %d NAK timeout\n", epnum);
1528 /* NOTE: NAKing is *NOT* an error, so we want to
1529 * continue. Except ... if there's a request for
1530 * another QH, use that instead of starving it.
1532 * Devices like Ethernet and serial adapters keep
1533 * reads posted at all times, which will starve
1534 * other devices without this logic.
1536 if (usb_pipebulk(urb->pipe)
1538 && !list_is_singular(&musb->in_bulk)) {
1539 musb_bulk_rx_nak_timeout(musb, hw_ep);
1542 musb_ep_select(mbase, epnum);
1543 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1544 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1545 musb_writew(epio, MUSB_RXCSR, rx_csr);
1549 DBG(4, "RX end %d ISO data error\n", epnum);
1550 /* packet error reported later */
1555 /* faults abort the transfer */
1557 /* clean up dma and collect transfer count */
1558 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1559 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1560 (void) musb->dma_controller->channel_abort(dma);
1561 xfer_len = dma->actual_len;
1563 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1564 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1569 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1570 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1571 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1575 /* thorough shutdown for now ... given more precise fault handling
1576 * and better queueing support, we might keep a DMA pipeline going
1577 * while processing this irq for earlier completions.
1580 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1582 #ifndef CONFIG_USB_INVENTRA_DMA
1583 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1584 /* REVISIT this happened for a while on some short reads...
1585 * the cleanup still needs investigation... looks bad...
1586 * and also duplicates dma cleanup code above ... plus,
1587 * shouldn't this be the "half full" double buffer case?
1589 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1590 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1591 (void) musb->dma_controller->channel_abort(dma);
1592 xfer_len = dma->actual_len;
1596 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1597 xfer_len, dma ? ", dma" : "");
1598 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1600 musb_ep_select(mbase, epnum);
1601 musb_writew(epio, MUSB_RXCSR,
1602 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1605 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1606 xfer_len = dma->actual_len;
1608 val &= ~(MUSB_RXCSR_DMAENAB
1609 | MUSB_RXCSR_H_AUTOREQ
1610 | MUSB_RXCSR_AUTOCLEAR
1611 | MUSB_RXCSR_RXPKTRDY);
1612 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1614 #ifdef CONFIG_USB_INVENTRA_DMA
1615 if (usb_pipeisoc(pipe)) {
1616 struct usb_iso_packet_descriptor *d;
1618 d = urb->iso_frame_desc + qh->iso_idx;
1619 d->actual_length = xfer_len;
1621 /* even if there was an error, we did the dma
1622 * for iso_frame_desc->length
1624 if (d->status != EILSEQ && d->status != -EOVERFLOW)
1627 if (++qh->iso_idx >= urb->number_of_packets)
1633 /* done if urb buffer is full or short packet is recd */
1634 done = (urb->actual_length + xfer_len >=
1635 urb->transfer_buffer_length
1636 || dma->actual_len < qh->maxpacket);
1639 /* send IN token for next packet, without AUTOREQ */
1641 val |= MUSB_RXCSR_H_REQPKT;
1642 musb_writew(epio, MUSB_RXCSR,
1643 MUSB_RXCSR_H_WZC_BITS | val);
1646 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1647 done ? "off" : "reset",
1648 musb_readw(epio, MUSB_RXCSR),
1649 musb_readw(epio, MUSB_RXCOUNT));
1653 } else if (urb->status == -EINPROGRESS) {
1654 /* if no errors, be sure a packet is ready for unloading */
1655 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1657 ERR("Rx interrupt with no errors or packet!\n");
1659 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1662 /* do the proper sequence to abort the transfer */
1663 musb_ep_select(mbase, epnum);
1664 val &= ~MUSB_RXCSR_H_REQPKT;
1665 musb_writew(epio, MUSB_RXCSR, val);
1669 /* we are expecting IN packets */
1670 #ifdef CONFIG_USB_INVENTRA_DMA
1672 struct dma_controller *c;
1677 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1679 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1682 + urb->actual_length,
1684 urb->transfer_buffer_length);
1686 c = musb->dma_controller;
1688 if (usb_pipeisoc(pipe)) {
1690 struct usb_iso_packet_descriptor *d;
1692 d = urb->iso_frame_desc + qh->iso_idx;
1698 if (rx_count > d->length) {
1700 status = -EOVERFLOW;
1703 DBG(2, "** OVERFLOW %d into %d\n",\
1704 rx_count, d->length);
1710 buf = urb->transfer_dma + d->offset;
1713 buf = urb->transfer_dma +
1717 dma->desired_mode = 0;
1719 /* because of the issue below, mode 1 will
1720 * only rarely behave with correct semantics.
1722 if ((urb->transfer_flags &
1724 && (urb->transfer_buffer_length -
1727 dma->desired_mode = 1;
1728 if (rx_count < hw_ep->max_packet_sz_rx) {
1730 dma->bDesiredMode = 0;
1732 length = urb->transfer_buffer_length;
1736 /* Disadvantage of using mode 1:
1737 * It's basically usable only for mass storage class; essentially all
1738 * other protocols also terminate transfers on short packets.
1741 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1742 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1743 * to use the extra IN token to grab the last packet using mode 0, then
1744 * the problem is that you cannot be sure when the device will send the
1745 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1746 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1747 * transfer, while sometimes it is recd just a little late so that if you
1748 * try to configure for mode 0 soon after the mode 1 transfer is
1749 * completed, you will find rxcount 0. Okay, so you might think why not
1750 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1753 val = musb_readw(epio, MUSB_RXCSR);
1754 val &= ~MUSB_RXCSR_H_REQPKT;
1756 if (dma->desired_mode == 0)
1757 val &= ~MUSB_RXCSR_H_AUTOREQ;
1759 val |= MUSB_RXCSR_H_AUTOREQ;
1760 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1762 musb_writew(epio, MUSB_RXCSR,
1763 MUSB_RXCSR_H_WZC_BITS | val);
1765 /* REVISIT if when actual_length != 0,
1766 * transfer_buffer_length needs to be
1769 ret = c->channel_program(
1771 dma->desired_mode, buf, length);
1774 c->channel_release(dma);
1775 hw_ep->rx_channel = NULL;
1777 /* REVISIT reset CSR */
1780 #endif /* Mentor DMA */
1783 done = musb_host_packet_rx(musb, urb,
1785 DBG(6, "read %spacket\n", done ? "last " : "");
1790 urb->actual_length += xfer_len;
1791 qh->offset += xfer_len;
1793 if (urb->status == -EINPROGRESS)
1794 urb->status = status;
1795 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1799 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1800 * the software schedule associates multiple such nodes with a given
1801 * host side hardware endpoint + direction; scheduling may activate
1802 * that hardware endpoint.
1804 static int musb_schedule(
1811 int best_end, epnum;
1812 struct musb_hw_ep *hw_ep = NULL;
1813 struct list_head *head = NULL;
1815 /* use fixed hardware for control and bulk */
1816 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1817 head = &musb->control;
1818 hw_ep = musb->control_ep;
1822 /* else, periodic transfers get muxed to other endpoints */
1825 * We know this qh hasn't been scheduled, so all we need to do
1826 * is choose which hardware endpoint to put it on ...
1828 * REVISIT what we really want here is a regular schedule tree
1829 * like e.g. OHCI uses.
1834 for (epnum = 1, hw_ep = musb->endpoints + 1;
1835 epnum < musb->nr_endpoints;
1839 if (is_in || hw_ep->is_shared_fifo) {
1840 if (hw_ep->in_qh != NULL)
1842 } else if (hw_ep->out_qh != NULL)
1845 if (hw_ep == musb->bulk_ep)
1849 diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1851 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1853 if (diff >= 0 && best_diff > diff) {
1858 /* use bulk reserved ep1 if no other ep is free */
1859 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1860 hw_ep = musb->bulk_ep;
1862 head = &musb->in_bulk;
1864 head = &musb->out_bulk;
1866 /* Enable bulk RX NAK timeout scheme when bulk requests are
1867 * multiplexed. This scheme doen't work in high speed to full
1868 * speed scenario as NAK interrupts are not coming from a
1869 * full speed device connected to a high speed device.
1870 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1871 * 4 (8 frame or 8ms) for FS device.
1873 if (is_in && qh->dev)
1875 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1877 } else if (best_end < 0) {
1883 hw_ep = musb->endpoints + best_end;
1884 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1887 idle = list_empty(head);
1888 list_add_tail(&qh->ring, head);
1892 qh->hep->hcpriv = qh;
1894 musb_start_urb(musb, is_in, qh);
1898 static int musb_urb_enqueue(
1899 struct usb_hcd *hcd,
1903 unsigned long flags;
1904 struct musb *musb = hcd_to_musb(hcd);
1905 struct usb_host_endpoint *hep = urb->ep;
1907 struct usb_endpoint_descriptor *epd = &hep->desc;
1912 /* host role must be active */
1913 if (!is_host_active(musb) || !musb->is_active)
1916 spin_lock_irqsave(&musb->lock, flags);
1917 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1918 qh = ret ? NULL : hep->hcpriv;
1921 spin_unlock_irqrestore(&musb->lock, flags);
1923 /* DMA mapping was already done, if needed, and this urb is on
1924 * hep->urb_list now ... so we're done, unless hep wasn't yet
1925 * scheduled onto a live qh.
1927 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1928 * disabled, testing for empty qh->ring and avoiding qh setup costs
1929 * except for the first urb queued after a config change.
1934 /* Allocate and initialize qh, minimizing the work done each time
1935 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1937 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1938 * for bugs in other kernel code to break this driver...
1940 qh = kzalloc(sizeof *qh, mem_flags);
1942 spin_lock_irqsave(&musb->lock, flags);
1943 usb_hcd_unlink_urb_from_ep(hcd, urb);
1944 spin_unlock_irqrestore(&musb->lock, flags);
1950 INIT_LIST_HEAD(&qh->ring);
1953 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1955 /* no high bandwidth support yet */
1956 if (qh->maxpacket & ~0x7ff) {
1961 qh->epnum = usb_endpoint_num(epd);
1962 qh->type = usb_endpoint_type(epd);
1964 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1965 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1967 /* precompute rxtype/txtype/type0 register */
1968 type_reg = (qh->type << 4) | qh->epnum;
1969 switch (urb->dev->speed) {
1973 case USB_SPEED_FULL:
1979 qh->type_reg = type_reg;
1981 /* Precompute RXINTERVAL/TXINTERVAL register */
1983 case USB_ENDPOINT_XFER_INT:
1985 * Full/low speeds use the linear encoding,
1986 * high speed uses the logarithmic encoding.
1988 if (urb->dev->speed <= USB_SPEED_FULL) {
1989 interval = max_t(u8, epd->bInterval, 1);
1993 case USB_ENDPOINT_XFER_ISOC:
1994 /* ISO always uses logarithmic encoding */
1995 interval = min_t(u8, epd->bInterval, 16);
1998 /* REVISIT we actually want to use NAK limits, hinting to the
1999 * transfer scheduling logic to try some other qh, e.g. try
2002 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
2004 * The downside of disabling this is that transfer scheduling
2005 * gets VERY unfair for nonperiodic transfers; a misbehaving
2006 * peripheral could make that hurt. That's perfectly normal
2007 * for reads from network or serial adapters ... so we have
2008 * partial NAKlimit support for bulk RX.
2010 * The upside of disabling it is simpler transfer scheduling.
2014 qh->intv_reg = interval;
2016 /* precompute addressing for external hub/tt ports */
2017 if (musb->is_multipoint) {
2018 struct usb_device *parent = urb->dev->parent;
2020 if (parent != hcd->self.root_hub) {
2021 qh->h_addr_reg = (u8) parent->devnum;
2023 /* set up tt info if needed */
2025 qh->h_port_reg = (u8) urb->dev->ttport;
2026 if (urb->dev->tt->hub)
2028 (u8) urb->dev->tt->hub->devnum;
2029 if (urb->dev->tt->multi)
2030 qh->h_addr_reg |= 0x80;
2035 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
2036 * until we get real dma queues (with an entry for each urb/buffer),
2037 * we only have work to do in the former case.
2039 spin_lock_irqsave(&musb->lock, flags);
2041 /* some concurrent activity submitted another urb to hep...
2042 * odd, rare, error prone, but legal.
2047 ret = musb_schedule(musb, qh,
2048 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2052 /* FIXME set urb->start_frame for iso/intr, it's tested in
2053 * musb_start_urb(), but otherwise only konicawc cares ...
2056 spin_unlock_irqrestore(&musb->lock, flags);
2060 spin_lock_irqsave(&musb->lock, flags);
2061 usb_hcd_unlink_urb_from_ep(hcd, urb);
2062 spin_unlock_irqrestore(&musb->lock, flags);
2070 * abort a transfer that's at the head of a hardware queue.
2071 * called with controller locked, irqs blocked
2072 * that hardware queue advances to the next transfer, unless prevented
2074 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
2076 struct musb_hw_ep *ep = qh->hw_ep;
2077 void __iomem *epio = ep->regs;
2078 unsigned hw_end = ep->epnum;
2079 void __iomem *regs = ep->musb->mregs;
2083 musb_ep_select(regs, hw_end);
2085 if (is_dma_capable()) {
2086 struct dma_channel *dma;
2088 dma = is_in ? ep->rx_channel : ep->tx_channel;
2090 status = ep->musb->dma_controller->channel_abort(dma);
2092 "abort %cX%d DMA for urb %p --> %d\n",
2093 is_in ? 'R' : 'T', ep->epnum,
2095 urb->actual_length += dma->actual_len;
2099 /* turn off DMA requests, discard state, stop polling ... */
2101 /* giveback saves bulk toggle */
2102 csr = musb_h_flush_rxfifo(ep, 0);
2104 /* REVISIT we still get an irq; should likely clear the
2105 * endpoint's irq status here to avoid bogus irqs.
2106 * clearing that status is platform-specific...
2108 } else if (ep->epnum) {
2109 musb_h_tx_flush_fifo(ep);
2110 csr = musb_readw(epio, MUSB_TXCSR);
2111 csr &= ~(MUSB_TXCSR_AUTOSET
2112 | MUSB_TXCSR_DMAENAB
2113 | MUSB_TXCSR_H_RXSTALL
2114 | MUSB_TXCSR_H_NAKTIMEOUT
2115 | MUSB_TXCSR_H_ERROR
2116 | MUSB_TXCSR_TXPKTRDY);
2117 musb_writew(epio, MUSB_TXCSR, csr);
2118 /* REVISIT may need to clear FLUSHFIFO ... */
2119 musb_writew(epio, MUSB_TXCSR, csr);
2120 /* flush cpu writebuffer */
2121 csr = musb_readw(epio, MUSB_TXCSR);
2123 musb_h_ep0_flush_fifo(ep);
2126 musb_advance_schedule(ep->musb, urb, ep, is_in);
2130 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2132 struct musb *musb = hcd_to_musb(hcd);
2134 struct list_head *sched;
2135 unsigned long flags;
2138 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2139 usb_pipedevice(urb->pipe),
2140 usb_pipeendpoint(urb->pipe),
2141 usb_pipein(urb->pipe) ? "in" : "out");
2143 spin_lock_irqsave(&musb->lock, flags);
2144 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2152 /* Any URB not actively programmed into endpoint hardware can be
2153 * immediately given back; that's any URB not at the head of an
2154 * endpoint queue, unless someday we get real DMA queues. And even
2155 * if it's at the head, it might not be known to the hardware...
2157 * Otherwise abort current transfer, pending dma, etc.; urb->status
2158 * has already been updated. This is a synchronous abort; it'd be
2159 * OK to hold off until after some IRQ, though.
2161 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2165 case USB_ENDPOINT_XFER_CONTROL:
2166 sched = &musb->control;
2168 case USB_ENDPOINT_XFER_BULK:
2170 if (usb_pipein(urb->pipe))
2171 sched = &musb->in_bulk;
2173 sched = &musb->out_bulk;
2177 /* REVISIT when we get a schedule tree, periodic
2178 * transfers won't always be at the head of a
2179 * singleton queue...
2186 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2187 if (ret < 0 || (sched && qh != first_qh(sched))) {
2188 int ready = qh->is_ready;
2192 __musb_giveback(musb, urb, 0);
2193 qh->is_ready = ready;
2195 /* If nothing else (usually musb_giveback) is using it
2196 * and its URB list has emptied, recycle this qh.
2198 if (ready && list_empty(&qh->hep->urb_list)) {
2199 qh->hep->hcpriv = NULL;
2200 list_del(&qh->ring);
2204 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2206 spin_unlock_irqrestore(&musb->lock, flags);
2210 /* disable an endpoint */
2212 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2214 u8 epnum = hep->desc.bEndpointAddress;
2215 unsigned long flags;
2216 struct musb *musb = hcd_to_musb(hcd);
2217 u8 is_in = epnum & USB_DIR_IN;
2220 struct list_head *sched;
2222 spin_lock_irqsave(&musb->lock, flags);
2229 case USB_ENDPOINT_XFER_CONTROL:
2230 sched = &musb->control;
2232 case USB_ENDPOINT_XFER_BULK:
2235 sched = &musb->in_bulk;
2237 sched = &musb->out_bulk;
2241 /* REVISIT when we get a schedule tree, periodic transfers
2242 * won't always be at the head of a singleton queue...
2248 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2250 /* kick first urb off the hardware, if needed */
2252 if (!sched || qh == first_qh(sched)) {
2255 /* make software (then hardware) stop ASAP */
2257 urb->status = -ESHUTDOWN;
2260 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2262 /* Then nuke all the others ... and advance the
2263 * queue on hw_ep (e.g. bulk ring) when we're done.
2265 while (!list_empty(&hep->urb_list)) {
2267 urb->status = -ESHUTDOWN;
2268 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2271 /* Just empty the queue; the hardware is busy with
2272 * other transfers, and since !qh->is_ready nothing
2273 * will activate any of these as it advances.
2275 while (!list_empty(&hep->urb_list))
2276 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2279 list_del(&qh->ring);
2283 spin_unlock_irqrestore(&musb->lock, flags);
2286 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2288 struct musb *musb = hcd_to_musb(hcd);
2290 return musb_readw(musb->mregs, MUSB_FRAME);
2293 static int musb_h_start(struct usb_hcd *hcd)
2295 struct musb *musb = hcd_to_musb(hcd);
2297 /* NOTE: musb_start() is called when the hub driver turns
2298 * on port power, or when (OTG) peripheral starts.
2300 hcd->state = HC_STATE_RUNNING;
2301 musb->port1_status = 0;
2305 static void musb_h_stop(struct usb_hcd *hcd)
2307 musb_stop(hcd_to_musb(hcd));
2308 hcd->state = HC_STATE_HALT;
2311 static int musb_bus_suspend(struct usb_hcd *hcd)
2313 struct musb *musb = hcd_to_musb(hcd);
2315 if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2318 if (is_host_active(musb) && musb->is_active) {
2319 WARNING("trying to suspend as %s is_active=%i\n",
2320 otg_state_string(musb), musb->is_active);
2326 static int musb_bus_resume(struct usb_hcd *hcd)
2328 /* resuming child port does the work */
2332 const struct hc_driver musb_hc_driver = {
2333 .description = "musb-hcd",
2334 .product_desc = "MUSB HDRC host driver",
2335 .hcd_priv_size = sizeof(struct musb),
2336 .flags = HCD_USB2 | HCD_MEMORY,
2338 /* not using irq handler or reset hooks from usbcore, since
2339 * those must be shared with peripheral code for OTG configs
2342 .start = musb_h_start,
2343 .stop = musb_h_stop,
2345 .get_frame_number = musb_h_get_frame_number,
2347 .urb_enqueue = musb_urb_enqueue,
2348 .urb_dequeue = musb_urb_dequeue,
2349 .endpoint_disable = musb_h_disable,
2351 .hub_status_data = musb_hub_status_data,
2352 .hub_control = musb_hub_control,
2353 .bus_suspend = musb_bus_suspend,
2354 .bus_resume = musb_bus_resume,
2355 /* .start_port_reset = NULL, */
2356 /* .hub_irq_enable = NULL, */