2 * MUSB OTG driver host support
4 * Copyright 2005 Mentor Graphics Corporation
5 * Copyright (C) 2005-2006 by Texas Instruments
6 * Copyright (C) 2006-2007 Nokia Corporation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
23 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
24 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
25 * NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
28 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
29 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/module.h>
36 #include <linux/kernel.h>
37 #include <linux/delay.h>
38 #include <linux/sched.h>
39 #include <linux/slab.h>
40 #include <linux/errno.h>
41 #include <linux/init.h>
42 #include <linux/list.h>
44 #include "musb_core.h"
45 #include "musb_host.h"
48 /* MUSB HOST status 22-mar-2006
50 * - There's still lots of partial code duplication for fault paths, so
51 * they aren't handled as consistently as they need to be.
53 * - PIO mostly behaved when last tested.
54 * + including ep0, with all usbtest cases 9, 10
55 * + usbtest 14 (ep0out) doesn't seem to run at all
56 * + double buffered OUT/TX endpoints saw stalls(!) with certain usbtest
57 * configurations, but otherwise double buffering passes basic tests.
58 * + for 2.6.N, for N > ~10, needs API changes for hcd framework.
60 * - DMA (CPPI) ... partially behaves, not currently recommended
61 * + about 1/15 the speed of typical EHCI implementations (PCI)
62 * + RX, all too often reqpkt seems to misbehave after tx
63 * + TX, no known issues (other than evident silicon issue)
65 * - DMA (Mentor/OMAP) ...has at least toggle update problems
67 * - [23-feb-2009] minimal traffic scheduling to avoid bulk RX packet
68 * starvation ... nothing yet for TX, interrupt, or bulk.
70 * - Not tested with HNP, but some SRP paths seem to behave.
72 * NOTE 24-August-2006:
74 * - Bulk traffic finally uses both sides of hardware ep1, freeing up an
75 * extra endpoint for periodic use enabling hub + keybd + mouse. That
76 * mostly works, except that with "usbnet" it's easy to trigger cases
77 * with "ping" where RX loses. (a) ping to davinci, even "ping -f",
78 * fine; but (b) ping _from_ davinci, even "ping -c 1", ICMP RX loses
79 * although ARP RX wins. (That test was done with a full speed link.)
84 * NOTE on endpoint usage:
86 * CONTROL transfers all go through ep0. BULK ones go through dedicated IN
87 * and OUT endpoints ... hardware is dedicated for those "async" queue(s).
88 * (Yes, bulk _could_ use more of the endpoints than that, and would even
91 * INTERUPPT and ISOCHRONOUS transfers are scheduled to the other endpoints.
92 * So far that scheduling is both dumb and optimistic: the endpoint will be
93 * "claimed" until its software queue is no longer refilled. No multiplexing
94 * of transfers between endpoints, or anything clever.
98 static void musb_ep_program(struct musb *musb, u8 epnum,
99 struct urb *urb, unsigned int nOut,
103 * Clear TX fifo. Needed to avoid BABBLE errors.
105 static void musb_h_tx_flush_fifo(struct musb_hw_ep *ep)
107 void __iomem *epio = ep->regs;
112 csr = musb_readw(epio, MUSB_TXCSR);
113 while (csr & MUSB_TXCSR_FIFONOTEMPTY) {
115 DBG(3, "Host TX FIFONOTEMPTY csr: %02x\n", csr);
117 csr |= MUSB_TXCSR_FLUSHFIFO;
118 musb_writew(epio, MUSB_TXCSR, csr);
119 csr = musb_readw(epio, MUSB_TXCSR);
120 if (WARN(retries-- < 1,
121 "Could not flush host TX%d fifo: csr: %04x\n",
128 static void musb_h_ep0_flush_fifo(struct musb_hw_ep *ep)
130 void __iomem *epio = ep->regs;
134 /* scrub any data left in the fifo */
136 csr = musb_readw(epio, MUSB_TXCSR);
137 if (!(csr & (MUSB_CSR0_TXPKTRDY | MUSB_CSR0_RXPKTRDY)))
139 musb_writew(epio, MUSB_TXCSR, MUSB_CSR0_FLUSHFIFO);
140 csr = musb_readw(epio, MUSB_TXCSR);
144 WARN(!retries, "Could not flush host TX%d fifo: csr: %04x\n",
147 /* and reset for the next transfer */
148 musb_writew(epio, MUSB_TXCSR, 0);
152 * Start transmit. Caller is responsible for locking shared resources.
153 * musb must be locked.
155 static inline void musb_h_tx_start(struct musb_hw_ep *ep)
159 /* NOTE: no locks here; caller should lock and select EP */
161 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
162 txcsr |= MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_H_WZC_BITS;
163 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
165 txcsr = MUSB_CSR0_H_SETUPPKT | MUSB_CSR0_TXPKTRDY;
166 musb_writew(ep->regs, MUSB_CSR0, txcsr);
171 static inline void cppi_host_txdma_start(struct musb_hw_ep *ep)
175 /* NOTE: no locks here; caller should lock and select EP */
176 txcsr = musb_readw(ep->regs, MUSB_TXCSR);
177 txcsr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_H_WZC_BITS;
178 musb_writew(ep->regs, MUSB_TXCSR, txcsr);
182 * Start the URB at the front of an endpoint's queue
183 * end must be claimed from the caller.
185 * Context: controller locked, irqs blocked
188 musb_start_urb(struct musb *musb, int is_in, struct musb_qh *qh)
193 void __iomem *mbase = musb->mregs;
194 struct urb *urb = next_urb(qh);
195 struct musb_hw_ep *hw_ep = qh->hw_ep;
196 unsigned pipe = urb->pipe;
197 u8 address = usb_pipedevice(pipe);
198 int epnum = hw_ep->epnum;
200 /* initialize software qh state */
204 /* gather right source of data */
206 case USB_ENDPOINT_XFER_CONTROL:
207 /* control transfers always start with SETUP */
210 musb->ep0_stage = MUSB_EP0_START;
211 buf = urb->setup_packet;
214 case USB_ENDPOINT_XFER_ISOC:
217 buf = urb->transfer_buffer + urb->iso_frame_desc[0].offset;
218 len = urb->iso_frame_desc[0].length;
220 default: /* bulk, interrupt */
221 /* actual_length may be nonzero on retry paths */
222 buf = urb->transfer_buffer + urb->actual_length;
223 len = urb->transfer_buffer_length - urb->actual_length;
226 DBG(4, "qh %p urb %p dev%d ep%d%s%s, hw_ep %d, %p/%d\n",
227 qh, urb, address, qh->epnum,
228 is_in ? "in" : "out",
229 ({char *s; switch (qh->type) {
230 case USB_ENDPOINT_XFER_CONTROL: s = ""; break;
231 case USB_ENDPOINT_XFER_BULK: s = "-bulk"; break;
232 case USB_ENDPOINT_XFER_ISOC: s = "-iso"; break;
233 default: s = "-intr"; break;
237 /* Configure endpoint */
238 if (is_in || hw_ep->is_shared_fifo)
242 musb_ep_program(musb, epnum, urb, !is_in, buf, len);
244 /* transmit may have more work: start it when it is time */
248 /* determine if the time is right for a periodic transfer */
250 case USB_ENDPOINT_XFER_ISOC:
251 case USB_ENDPOINT_XFER_INT:
252 DBG(3, "check whether there's still time for periodic Tx\n");
254 frame = musb_readw(mbase, MUSB_FRAME);
255 /* FIXME this doesn't implement that scheduling policy ...
256 * or handle framecounter wrapping
258 if ((urb->transfer_flags & URB_ISO_ASAP)
259 || (frame >= urb->start_frame)) {
260 /* REVISIT the SOF irq handler shouldn't duplicate
261 * this code; and we don't init urb->start_frame...
266 qh->frame = urb->start_frame;
267 /* enable SOF interrupt so we can count down */
268 DBG(1, "SOF for %d\n", epnum);
269 #if 1 /* ifndef CONFIG_ARCH_DAVINCI */
270 musb_writeb(mbase, MUSB_INTRUSBE, 0xff);
276 DBG(4, "Start TX%d %s\n", epnum,
277 hw_ep->tx_channel ? "dma" : "pio");
279 if (!hw_ep->tx_channel)
280 musb_h_tx_start(hw_ep);
281 else if (is_cppi_enabled() || tusb_dma_omap())
282 cppi_host_txdma_start(hw_ep);
286 /* caller owns controller lock, irqs are blocked */
288 __musb_giveback(struct musb *musb, struct urb *urb, int status)
289 __releases(musb->lock)
290 __acquires(musb->lock)
292 DBG(({ int level; switch (status) {
296 /* common/boring faults */
307 "complete %p %pF (%d), dev%d ep%d%s, %d/%d\n",
308 urb, urb->complete, status,
309 usb_pipedevice(urb->pipe),
310 usb_pipeendpoint(urb->pipe),
311 usb_pipein(urb->pipe) ? "in" : "out",
312 urb->actual_length, urb->transfer_buffer_length
315 usb_hcd_unlink_urb_from_ep(musb_to_hcd(musb), urb);
316 spin_unlock(&musb->lock);
317 usb_hcd_giveback_urb(musb_to_hcd(musb), urb, status);
318 spin_lock(&musb->lock);
321 /* for bulk/interrupt endpoints only */
323 musb_save_toggle(struct musb_hw_ep *ep, int is_in, struct urb *urb)
325 struct usb_device *udev = urb->dev;
327 void __iomem *epio = ep->regs;
330 /* FIXME: the current Mentor DMA code seems to have
331 * problems getting toggle correct.
334 if (is_in || ep->is_shared_fifo)
340 csr = musb_readw(epio, MUSB_TXCSR);
341 usb_settoggle(udev, qh->epnum, 1,
342 (csr & MUSB_TXCSR_H_DATATOGGLE)
345 csr = musb_readw(epio, MUSB_RXCSR);
346 usb_settoggle(udev, qh->epnum, 0,
347 (csr & MUSB_RXCSR_H_DATATOGGLE)
352 /* caller owns controller lock, irqs are blocked */
353 static struct musb_qh *
354 musb_giveback(struct musb_qh *qh, struct urb *urb, int status)
356 struct musb_hw_ep *ep = qh->hw_ep;
357 struct musb *musb = ep->musb;
358 int is_in = usb_pipein(urb->pipe);
359 int ready = qh->is_ready;
361 /* save toggle eagerly, for paranoia */
363 case USB_ENDPOINT_XFER_BULK:
364 case USB_ENDPOINT_XFER_INT:
365 musb_save_toggle(ep, is_in, urb);
367 case USB_ENDPOINT_XFER_ISOC:
368 if (status == 0 && urb->error_count)
374 __musb_giveback(musb, urb, status);
375 qh->is_ready = ready;
377 /* reclaim resources (and bandwidth) ASAP; deschedule it, and
378 * invalidate qh as soon as list_empty(&hep->urb_list)
380 if (list_empty(&qh->hep->urb_list)) {
381 struct list_head *head;
388 /* clobber old pointers to this qh */
389 if (is_in || ep->is_shared_fifo)
393 qh->hep->hcpriv = NULL;
397 case USB_ENDPOINT_XFER_CONTROL:
398 case USB_ENDPOINT_XFER_BULK:
399 /* fifo policy for these lists, except that NAKing
400 * should rotate a qh to the end (for fairness).
403 head = qh->ring.prev;
410 case USB_ENDPOINT_XFER_ISOC:
411 case USB_ENDPOINT_XFER_INT:
412 /* this is where periodic bandwidth should be
413 * de-allocated if it's tracked and allocated;
414 * and where we'd update the schedule tree...
425 * Advance this hardware endpoint's queue, completing the specified urb and
426 * advancing to either the next urb queued to that qh, or else invalidating
427 * that qh and advancing to the next qh scheduled after the current one.
429 * Context: caller owns controller lock, irqs are blocked
432 musb_advance_schedule(struct musb *musb, struct urb *urb,
433 struct musb_hw_ep *hw_ep, int is_in)
437 if (is_in || hw_ep->is_shared_fifo)
442 if (urb->status == -EINPROGRESS)
443 qh = musb_giveback(qh, urb, 0);
445 qh = musb_giveback(qh, urb, urb->status);
447 if (qh != NULL && qh->is_ready) {
448 DBG(4, "... next ep%d %cX urb %p\n",
449 hw_ep->epnum, is_in ? 'R' : 'T',
451 musb_start_urb(musb, is_in, qh);
455 static u16 musb_h_flush_rxfifo(struct musb_hw_ep *hw_ep, u16 csr)
457 /* we don't want fifo to fill itself again;
458 * ignore dma (various models),
459 * leave toggle alone (may not have been saved yet)
461 csr |= MUSB_RXCSR_FLUSHFIFO | MUSB_RXCSR_RXPKTRDY;
462 csr &= ~(MUSB_RXCSR_H_REQPKT
463 | MUSB_RXCSR_H_AUTOREQ
464 | MUSB_RXCSR_AUTOCLEAR);
466 /* write 2x to allow double buffering */
467 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
468 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
470 /* flush writebuffer */
471 return musb_readw(hw_ep->regs, MUSB_RXCSR);
475 * PIO RX for a packet (or part of it).
478 musb_host_packet_rx(struct musb *musb, struct urb *urb, u8 epnum, u8 iso_err)
486 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
487 void __iomem *epio = hw_ep->regs;
488 struct musb_qh *qh = hw_ep->in_qh;
489 int pipe = urb->pipe;
490 void *buffer = urb->transfer_buffer;
492 /* musb_ep_select(mbase, epnum); */
493 rx_count = musb_readw(epio, MUSB_RXCOUNT);
494 DBG(3, "RX%d count %d, buffer %p len %d/%d\n", epnum, rx_count,
495 urb->transfer_buffer, qh->offset,
496 urb->transfer_buffer_length);
499 if (usb_pipeisoc(pipe)) {
501 struct usb_iso_packet_descriptor *d;
508 d = urb->iso_frame_desc + qh->iso_idx;
509 buf = buffer + d->offset;
511 if (rx_count > length) {
516 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
520 urb->actual_length += length;
521 d->actual_length = length;
525 /* see if we are done */
526 done = (++qh->iso_idx >= urb->number_of_packets);
529 buf = buffer + qh->offset;
530 length = urb->transfer_buffer_length - qh->offset;
531 if (rx_count > length) {
532 if (urb->status == -EINPROGRESS)
533 urb->status = -EOVERFLOW;
534 DBG(2, "** OVERFLOW %d into %d\n", rx_count, length);
538 urb->actual_length += length;
539 qh->offset += length;
541 /* see if we are done */
542 done = (urb->actual_length == urb->transfer_buffer_length)
543 || (rx_count < qh->maxpacket)
544 || (urb->status != -EINPROGRESS);
546 && (urb->status == -EINPROGRESS)
547 && (urb->transfer_flags & URB_SHORT_NOT_OK)
548 && (urb->actual_length
549 < urb->transfer_buffer_length))
550 urb->status = -EREMOTEIO;
553 musb_read_fifo(hw_ep, length, buf);
555 csr = musb_readw(epio, MUSB_RXCSR);
556 csr |= MUSB_RXCSR_H_WZC_BITS;
557 if (unlikely(do_flush))
558 musb_h_flush_rxfifo(hw_ep, csr);
560 /* REVISIT this assumes AUTOCLEAR is never set */
561 csr &= ~(MUSB_RXCSR_RXPKTRDY | MUSB_RXCSR_H_REQPKT);
563 csr |= MUSB_RXCSR_H_REQPKT;
564 musb_writew(epio, MUSB_RXCSR, csr);
570 /* we don't always need to reinit a given side of an endpoint...
571 * when we do, use tx/rx reinit routine and then construct a new CSR
572 * to address data toggle, NYET, and DMA or PIO.
574 * it's possible that driver bugs (especially for DMA) or aborting a
575 * transfer might have left the endpoint busier than it should be.
576 * the busy/not-empty tests are basically paranoia.
579 musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
583 /* NOTE: we know the "rx" fifo reinit never triggers for ep0.
584 * That always uses tx_reinit since ep0 repurposes TX register
585 * offsets; the initial SETUP packet is also a kind of OUT.
588 /* if programmed for Tx, put it in RX mode */
589 if (ep->is_shared_fifo) {
590 csr = musb_readw(ep->regs, MUSB_TXCSR);
591 if (csr & MUSB_TXCSR_MODE) {
592 musb_h_tx_flush_fifo(ep);
593 musb_writew(ep->regs, MUSB_TXCSR,
594 MUSB_TXCSR_FRCDATATOG);
596 /* clear mode (and everything else) to enable Rx */
597 musb_writew(ep->regs, MUSB_TXCSR, 0);
599 /* scrub all previous state, clearing toggle */
601 csr = musb_readw(ep->regs, MUSB_RXCSR);
602 if (csr & MUSB_RXCSR_RXPKTRDY)
603 WARNING("rx%d, packet/%d ready?\n", ep->epnum,
604 musb_readw(ep->regs, MUSB_RXCOUNT));
606 musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
609 /* target addr and (for multipoint) hub addr/port */
610 if (musb->is_multipoint) {
611 musb_write_rxfunaddr(ep->target_regs, qh->addr_reg);
612 musb_write_rxhubaddr(ep->target_regs, qh->h_addr_reg);
613 musb_write_rxhubport(ep->target_regs, qh->h_port_reg);
616 musb_writeb(musb->mregs, MUSB_FADDR, qh->addr_reg);
618 /* protocol/endpoint, interval/NAKlimit, i/o size */
619 musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
620 musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
621 /* NOTE: bulk combining rewrites high bits of maxpacket */
622 musb_writew(ep->regs, MUSB_RXMAXP, qh->maxpacket);
629 * Program an HDRC endpoint as per the given URB
630 * Context: irqs blocked, controller lock held
632 static void musb_ep_program(struct musb *musb, u8 epnum,
633 struct urb *urb, unsigned int is_out,
636 struct dma_controller *dma_controller;
637 struct dma_channel *dma_channel;
639 void __iomem *mbase = musb->mregs;
640 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
641 void __iomem *epio = hw_ep->regs;
645 if (!is_out || hw_ep->is_shared_fifo)
650 packet_sz = qh->maxpacket;
652 DBG(3, "%s hw%d urb %p spd%d dev%d ep%d%s "
653 "h_addr%02x h_port%02x bytes %d\n",
654 is_out ? "-->" : "<--",
655 epnum, urb, urb->dev->speed,
656 qh->addr_reg, qh->epnum, is_out ? "out" : "in",
657 qh->h_addr_reg, qh->h_port_reg,
660 musb_ep_select(mbase, epnum);
662 /* candidate for DMA? */
663 dma_controller = musb->dma_controller;
664 if (is_dma_capable() && epnum && dma_controller) {
665 dma_channel = is_out ? hw_ep->tx_channel : hw_ep->rx_channel;
667 dma_channel = dma_controller->channel_alloc(
668 dma_controller, hw_ep, is_out);
670 hw_ep->tx_channel = dma_channel;
672 hw_ep->rx_channel = dma_channel;
677 /* make sure we clear DMAEnab, autoSet bits from previous run */
679 /* OUT/transmit/EP0 or IN/receive? */
685 csr = musb_readw(epio, MUSB_TXCSR);
687 /* disable interrupt in case we flush */
688 int_txe = musb_readw(mbase, MUSB_INTRTXE);
689 musb_writew(mbase, MUSB_INTRTXE, int_txe & ~(1 << epnum));
691 /* general endpoint setup */
693 /* ASSERT: TXCSR_DMAENAB was already cleared */
695 /* flush all old state, set default */
696 musb_h_tx_flush_fifo(hw_ep);
697 csr &= ~(MUSB_TXCSR_H_NAKTIMEOUT
699 | MUSB_TXCSR_FRCDATATOG
700 | MUSB_TXCSR_H_RXSTALL
702 | MUSB_TXCSR_TXPKTRDY
704 csr |= MUSB_TXCSR_MODE;
706 if (usb_gettoggle(urb->dev,
708 csr |= MUSB_TXCSR_H_WR_DATATOGGLE
709 | MUSB_TXCSR_H_DATATOGGLE;
711 csr |= MUSB_TXCSR_CLRDATATOG;
713 /* twice in case of double packet buffering */
714 musb_writew(epio, MUSB_TXCSR, csr);
715 /* REVISIT may need to clear FLUSHFIFO ... */
716 musb_writew(epio, MUSB_TXCSR, csr);
717 csr = musb_readw(epio, MUSB_TXCSR);
719 /* endpoint 0: just flush */
720 musb_h_ep0_flush_fifo(hw_ep);
723 /* target addr and (for multipoint) hub addr/port */
724 if (musb->is_multipoint) {
725 musb_write_txfunaddr(mbase, epnum, qh->addr_reg);
726 musb_write_txhubaddr(mbase, epnum, qh->h_addr_reg);
727 musb_write_txhubport(mbase, epnum, qh->h_port_reg);
728 /* FIXME if !epnum, do the same for RX ... */
730 musb_writeb(mbase, MUSB_FADDR, qh->addr_reg);
732 /* protocol/endpoint/interval/NAKlimit */
734 musb_writeb(epio, MUSB_TXTYPE, qh->type_reg);
735 if (can_bulk_split(musb, qh->type))
736 musb_writew(epio, MUSB_TXMAXP,
738 | ((hw_ep->max_packet_sz_tx /
739 packet_sz) - 1) << 11);
741 musb_writew(epio, MUSB_TXMAXP,
743 musb_writeb(epio, MUSB_TXINTERVAL, qh->intv_reg);
745 musb_writeb(epio, MUSB_NAKLIMIT0, qh->intv_reg);
746 if (musb->is_multipoint)
747 musb_writeb(epio, MUSB_TYPE0,
751 if (can_bulk_split(musb, qh->type))
752 load_count = min((u32) hw_ep->max_packet_sz_tx,
755 load_count = min((u32) packet_sz, len);
757 #ifdef CONFIG_USB_INVENTRA_DMA
760 /* clear previous state */
761 csr = musb_readw(epio, MUSB_TXCSR);
762 csr &= ~(MUSB_TXCSR_AUTOSET
764 | MUSB_TXCSR_DMAENAB);
765 csr |= MUSB_TXCSR_MODE;
766 musb_writew(epio, MUSB_TXCSR,
767 csr | MUSB_TXCSR_MODE);
769 qh->segsize = min(len, dma_channel->max_len);
771 if (qh->segsize <= packet_sz)
772 dma_channel->desired_mode = 0;
774 dma_channel->desired_mode = 1;
777 if (dma_channel->desired_mode == 0) {
778 csr &= ~(MUSB_TXCSR_AUTOSET
779 | MUSB_TXCSR_DMAMODE);
780 csr |= (MUSB_TXCSR_DMAENAB);
781 /* against programming guide */
783 csr |= (MUSB_TXCSR_AUTOSET
785 | MUSB_TXCSR_DMAMODE);
787 musb_writew(epio, MUSB_TXCSR, csr);
789 dma_ok = dma_controller->channel_program(
790 dma_channel, packet_sz,
791 dma_channel->desired_mode,
797 dma_controller->channel_release(dma_channel);
799 hw_ep->tx_channel = NULL;
801 hw_ep->rx_channel = NULL;
807 /* candidate for DMA */
808 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
810 /* program endpoint CSRs first, then setup DMA.
811 * assume CPPI setup succeeds.
812 * defer enabling dma.
814 csr = musb_readw(epio, MUSB_TXCSR);
815 csr &= ~(MUSB_TXCSR_AUTOSET
817 | MUSB_TXCSR_DMAENAB);
818 csr |= MUSB_TXCSR_MODE;
819 musb_writew(epio, MUSB_TXCSR,
820 csr | MUSB_TXCSR_MODE);
822 dma_channel->actual_len = 0L;
825 /* TX uses "rndis" mode automatically, but needs help
826 * to identify the zero-length-final-packet case.
828 dma_ok = dma_controller->channel_program(
829 dma_channel, packet_sz,
838 dma_controller->channel_release(dma_channel);
839 hw_ep->tx_channel = NULL;
842 /* REVISIT there's an error path here that
843 * needs handling: can't do dma, but
844 * there's no pio buffer address...
850 /* ASSERT: TXCSR_DMAENAB was already cleared */
852 /* PIO to load FIFO */
853 qh->segsize = load_count;
854 musb_write_fifo(hw_ep, load_count, buf);
855 csr = musb_readw(epio, MUSB_TXCSR);
856 csr &= ~(MUSB_TXCSR_DMAENAB
858 | MUSB_TXCSR_AUTOSET);
860 csr |= MUSB_TXCSR_MODE;
863 musb_writew(epio, MUSB_TXCSR, csr);
866 /* re-enable interrupt */
867 musb_writew(mbase, MUSB_INTRTXE, int_txe);
873 if (hw_ep->rx_reinit) {
874 musb_rx_reinit(musb, qh, hw_ep);
876 /* init new state: toggle and NYET, maybe DMA later */
877 if (usb_gettoggle(urb->dev, qh->epnum, 0))
878 csr = MUSB_RXCSR_H_WR_DATATOGGLE
879 | MUSB_RXCSR_H_DATATOGGLE;
882 if (qh->type == USB_ENDPOINT_XFER_INT)
883 csr |= MUSB_RXCSR_DISNYET;
886 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
888 if (csr & (MUSB_RXCSR_RXPKTRDY
890 | MUSB_RXCSR_H_REQPKT))
891 ERR("broken !rx_reinit, ep%d csr %04x\n",
894 /* scrub any stale state, leaving toggle alone */
895 csr &= MUSB_RXCSR_DISNYET;
898 /* kick things off */
900 if ((is_cppi_enabled() || tusb_dma_omap()) && dma_channel) {
901 /* candidate for DMA */
903 dma_channel->actual_len = 0L;
906 /* AUTOREQ is in a DMA register */
907 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
908 csr = musb_readw(hw_ep->regs,
911 /* unless caller treats short rx transfers as
912 * errors, we dare not queue multiple transfers.
914 dma_ok = dma_controller->channel_program(
915 dma_channel, packet_sz,
916 !(urb->transfer_flags
921 dma_controller->channel_release(
923 hw_ep->rx_channel = NULL;
926 csr |= MUSB_RXCSR_DMAENAB;
930 csr |= MUSB_RXCSR_H_REQPKT;
931 DBG(7, "RXCSR%d := %04x\n", epnum, csr);
932 musb_writew(hw_ep->regs, MUSB_RXCSR, csr);
933 csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
939 * Service the default endpoint (ep0) as host.
940 * Return true until it's time to start the status stage.
942 static bool musb_h_ep0_continue(struct musb *musb, u16 len, struct urb *urb)
945 u8 *fifo_dest = NULL;
947 struct musb_hw_ep *hw_ep = musb->control_ep;
948 struct musb_qh *qh = hw_ep->in_qh;
949 struct usb_ctrlrequest *request;
951 switch (musb->ep0_stage) {
953 fifo_dest = urb->transfer_buffer + urb->actual_length;
954 fifo_count = min_t(size_t, len, urb->transfer_buffer_length -
956 if (fifo_count < len)
957 urb->status = -EOVERFLOW;
959 musb_read_fifo(hw_ep, fifo_count, fifo_dest);
961 urb->actual_length += fifo_count;
962 if (len < qh->maxpacket) {
963 /* always terminate on short read; it's
964 * rarely reported as an error.
966 } else if (urb->actual_length <
967 urb->transfer_buffer_length)
971 request = (struct usb_ctrlrequest *) urb->setup_packet;
973 if (!request->wLength) {
974 DBG(4, "start no-DATA\n");
976 } else if (request->bRequestType & USB_DIR_IN) {
977 DBG(4, "start IN-DATA\n");
978 musb->ep0_stage = MUSB_EP0_IN;
982 DBG(4, "start OUT-DATA\n");
983 musb->ep0_stage = MUSB_EP0_OUT;
988 fifo_count = min_t(size_t, qh->maxpacket,
989 urb->transfer_buffer_length -
992 fifo_dest = (u8 *) (urb->transfer_buffer
993 + urb->actual_length);
994 DBG(3, "Sending %d byte%s to ep0 fifo %p\n",
996 (fifo_count == 1) ? "" : "s",
998 musb_write_fifo(hw_ep, fifo_count, fifo_dest);
1000 urb->actual_length += fifo_count;
1005 ERR("bogus ep0 stage %d\n", musb->ep0_stage);
1013 * Handle default endpoint interrupt as host. Only called in IRQ time
1014 * from musb_interrupt().
1016 * called with controller irqlocked
1018 irqreturn_t musb_h_ep0_irq(struct musb *musb)
1023 void __iomem *mbase = musb->mregs;
1024 struct musb_hw_ep *hw_ep = musb->control_ep;
1025 void __iomem *epio = hw_ep->regs;
1026 struct musb_qh *qh = hw_ep->in_qh;
1027 bool complete = false;
1028 irqreturn_t retval = IRQ_NONE;
1030 /* ep0 only has one queue, "in" */
1033 musb_ep_select(mbase, 0);
1034 csr = musb_readw(epio, MUSB_CSR0);
1035 len = (csr & MUSB_CSR0_RXPKTRDY)
1036 ? musb_readb(epio, MUSB_COUNT0)
1039 DBG(4, "<== csr0 %04x, qh %p, count %d, urb %p, stage %d\n",
1040 csr, qh, len, urb, musb->ep0_stage);
1042 /* if we just did status stage, we are done */
1043 if (MUSB_EP0_STATUS == musb->ep0_stage) {
1044 retval = IRQ_HANDLED;
1048 /* prepare status */
1049 if (csr & MUSB_CSR0_H_RXSTALL) {
1050 DBG(6, "STALLING ENDPOINT\n");
1053 } else if (csr & MUSB_CSR0_H_ERROR) {
1054 DBG(2, "no response, csr0 %04x\n", csr);
1057 } else if (csr & MUSB_CSR0_H_NAKTIMEOUT) {
1058 DBG(2, "control NAK timeout\n");
1060 /* NOTE: this code path would be a good place to PAUSE a
1061 * control transfer, if another one is queued, so that
1062 * ep0 is more likely to stay busy. That's already done
1063 * for bulk RX transfers.
1065 * if (qh->ring.next != &musb->control), then
1066 * we have a candidate... NAKing is *NOT* an error
1068 musb_writew(epio, MUSB_CSR0, 0);
1069 retval = IRQ_HANDLED;
1073 DBG(6, "aborting\n");
1074 retval = IRQ_HANDLED;
1076 urb->status = status;
1079 /* use the proper sequence to abort the transfer */
1080 if (csr & MUSB_CSR0_H_REQPKT) {
1081 csr &= ~MUSB_CSR0_H_REQPKT;
1082 musb_writew(epio, MUSB_CSR0, csr);
1083 csr &= ~MUSB_CSR0_H_NAKTIMEOUT;
1084 musb_writew(epio, MUSB_CSR0, csr);
1086 musb_h_ep0_flush_fifo(hw_ep);
1089 musb_writeb(epio, MUSB_NAKLIMIT0, 0);
1092 musb_writew(epio, MUSB_CSR0, 0);
1095 if (unlikely(!urb)) {
1096 /* stop endpoint since we have no place for its data, this
1097 * SHOULD NEVER HAPPEN! */
1098 ERR("no URB for end 0\n");
1100 musb_h_ep0_flush_fifo(hw_ep);
1105 /* call common logic and prepare response */
1106 if (musb_h_ep0_continue(musb, len, urb)) {
1107 /* more packets required */
1108 csr = (MUSB_EP0_IN == musb->ep0_stage)
1109 ? MUSB_CSR0_H_REQPKT : MUSB_CSR0_TXPKTRDY;
1111 /* data transfer complete; perform status phase */
1112 if (usb_pipeout(urb->pipe)
1113 || !urb->transfer_buffer_length)
1114 csr = MUSB_CSR0_H_STATUSPKT
1115 | MUSB_CSR0_H_REQPKT;
1117 csr = MUSB_CSR0_H_STATUSPKT
1118 | MUSB_CSR0_TXPKTRDY;
1120 /* flag status stage */
1121 musb->ep0_stage = MUSB_EP0_STATUS;
1123 DBG(5, "ep0 STATUS, csr %04x\n", csr);
1126 musb_writew(epio, MUSB_CSR0, csr);
1127 retval = IRQ_HANDLED;
1129 musb->ep0_stage = MUSB_EP0_IDLE;
1131 /* call completion handler if done */
1133 musb_advance_schedule(musb, urb, hw_ep, 1);
1139 #ifdef CONFIG_USB_INVENTRA_DMA
1141 /* Host side TX (OUT) using Mentor DMA works as follows:
1143 - if queue was empty, Program Endpoint
1144 - ... which starts DMA to fifo in mode 1 or 0
1146 DMA Isr (transfer complete) -> TxAvail()
1147 - Stop DMA (~DmaEnab) (<--- Alert ... currently happens
1148 only in musb_cleanup_urb)
1149 - TxPktRdy has to be set in mode 0 or for
1150 short packets in mode 1.
1155 /* Service a Tx-Available or dma completion irq for the endpoint */
1156 void musb_host_tx(struct musb *musb, u8 epnum)
1164 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1165 void __iomem *epio = hw_ep->regs;
1166 struct musb_qh *qh = hw_ep->is_shared_fifo ? hw_ep->in_qh
1169 void __iomem *mbase = musb->mregs;
1170 struct dma_channel *dma;
1174 musb_ep_select(mbase, epnum);
1175 tx_csr = musb_readw(epio, MUSB_TXCSR);
1177 /* with CPPI, DMA sometimes triggers "extra" irqs */
1179 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1184 dma = is_dma_capable() ? hw_ep->tx_channel : NULL;
1185 DBG(4, "OUT/TX%d end, csr %04x%s\n", epnum, tx_csr,
1186 dma ? ", dma" : "");
1188 /* check for errors */
1189 if (tx_csr & MUSB_TXCSR_H_RXSTALL) {
1190 /* dma was disabled, fifo flushed */
1191 DBG(3, "TX end %d stall\n", epnum);
1193 /* stall; record URB status */
1196 } else if (tx_csr & MUSB_TXCSR_H_ERROR) {
1197 /* (NON-ISO) dma was disabled, fifo flushed */
1198 DBG(3, "TX 3strikes on ep=%d\n", epnum);
1200 status = -ETIMEDOUT;
1202 } else if (tx_csr & MUSB_TXCSR_H_NAKTIMEOUT) {
1203 DBG(6, "TX end=%d device not responding\n", epnum);
1205 /* NOTE: this code path would be a good place to PAUSE a
1206 * transfer, if there's some other (nonperiodic) tx urb
1207 * that could use this fifo. (dma complicates it...)
1208 * That's already done for bulk RX transfers.
1210 * if (bulk && qh->ring.next != &musb->out_bulk), then
1211 * we have a candidate... NAKing is *NOT* an error
1213 musb_ep_select(mbase, epnum);
1214 musb_writew(epio, MUSB_TXCSR,
1215 MUSB_TXCSR_H_WZC_BITS
1216 | MUSB_TXCSR_TXPKTRDY);
1221 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1222 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1223 (void) musb->dma_controller->channel_abort(dma);
1226 /* do the proper sequence to abort the transfer in the
1227 * usb core; the dma engine should already be stopped.
1229 musb_h_tx_flush_fifo(hw_ep);
1230 tx_csr &= ~(MUSB_TXCSR_AUTOSET
1231 | MUSB_TXCSR_DMAENAB
1232 | MUSB_TXCSR_H_ERROR
1233 | MUSB_TXCSR_H_RXSTALL
1234 | MUSB_TXCSR_H_NAKTIMEOUT
1237 musb_ep_select(mbase, epnum);
1238 musb_writew(epio, MUSB_TXCSR, tx_csr);
1239 /* REVISIT may need to clear FLUSHFIFO ... */
1240 musb_writew(epio, MUSB_TXCSR, tx_csr);
1241 musb_writeb(epio, MUSB_TXINTERVAL, 0);
1246 /* second cppi case */
1247 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1248 DBG(4, "extra TX%d ready, csr %04x\n", epnum, tx_csr);
1253 /* REVISIT this looks wrong... */
1254 if (!status || dma || usb_pipeisoc(pipe)) {
1256 wLength = dma->actual_len;
1258 wLength = qh->segsize;
1259 qh->offset += wLength;
1261 if (usb_pipeisoc(pipe)) {
1262 struct usb_iso_packet_descriptor *d;
1264 d = urb->iso_frame_desc + qh->iso_idx;
1265 d->actual_length = qh->segsize;
1266 if (++qh->iso_idx >= urb->number_of_packets) {
1270 buf = urb->transfer_buffer + d->offset;
1271 wLength = d->length;
1276 /* see if we need to send more data, or ZLP */
1277 if (qh->segsize < qh->maxpacket)
1279 else if (qh->offset == urb->transfer_buffer_length
1280 && !(urb->transfer_flags
1284 buf = urb->transfer_buffer
1286 wLength = urb->transfer_buffer_length
1292 /* urb->status != -EINPROGRESS means request has been faulted,
1293 * so we must abort this transfer after cleanup
1295 if (urb->status != -EINPROGRESS) {
1298 status = urb->status;
1303 urb->status = status;
1304 urb->actual_length = qh->offset;
1305 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_OUT);
1307 } else if (!(tx_csr & MUSB_TXCSR_DMAENAB)) {
1308 /* WARN_ON(!buf); */
1310 /* REVISIT: some docs say that when hw_ep->tx_double_buffered,
1311 * (and presumably, fifo is not half-full) we should write TWO
1312 * packets before updating TXCSR ... other docs disagree ...
1314 /* PIO: start next packet in this URB */
1315 if (wLength > qh->maxpacket)
1316 wLength = qh->maxpacket;
1317 musb_write_fifo(hw_ep, wLength, buf);
1318 qh->segsize = wLength;
1320 musb_ep_select(mbase, epnum);
1321 musb_writew(epio, MUSB_TXCSR,
1322 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
1324 DBG(1, "not complete, but dma enabled?\n");
1331 #ifdef CONFIG_USB_INVENTRA_DMA
1333 /* Host side RX (IN) using Mentor DMA works as follows:
1335 - if queue was empty, ProgramEndpoint
1336 - first IN token is sent out (by setting ReqPkt)
1337 LinuxIsr -> RxReady()
1338 /\ => first packet is received
1339 | - Set in mode 0 (DmaEnab, ~ReqPkt)
1340 | -> DMA Isr (transfer complete) -> RxReady()
1341 | - Ack receive (~RxPktRdy), turn off DMA (~DmaEnab)
1342 | - if urb not complete, send next IN token (ReqPkt)
1343 | | else complete urb.
1345 ---------------------------
1347 * Nuances of mode 1:
1348 * For short packets, no ack (+RxPktRdy) is sent automatically
1349 * (even if AutoClear is ON)
1350 * For full packets, ack (~RxPktRdy) and next IN token (+ReqPkt) is sent
1351 * automatically => major problem, as collecting the next packet becomes
1352 * difficult. Hence mode 1 is not used.
1355 * All we care about at this driver level is that
1356 * (a) all URBs terminate with REQPKT cleared and fifo(s) empty;
1357 * (b) termination conditions are: short RX, or buffer full;
1358 * (c) fault modes include
1359 * - iff URB_SHORT_NOT_OK, short RX status is -EREMOTEIO.
1360 * (and that endpoint's dma queue stops immediately)
1361 * - overflow (full, PLUS more bytes in the terminal packet)
1363 * So for example, usb-storage sets URB_SHORT_NOT_OK, and would
1364 * thus be a great candidate for using mode 1 ... for all but the
1365 * last packet of one URB's transfer.
1370 /* Schedule next QH from musb->in_bulk and move the current qh to
1371 * the end; avoids starvation for other endpoints.
1373 static void musb_bulk_rx_nak_timeout(struct musb *musb, struct musb_hw_ep *ep)
1375 struct dma_channel *dma;
1377 void __iomem *mbase = musb->mregs;
1378 void __iomem *epio = ep->regs;
1379 struct musb_qh *cur_qh, *next_qh;
1382 musb_ep_select(mbase, ep->epnum);
1383 dma = is_dma_capable() ? ep->rx_channel : NULL;
1385 /* clear nak timeout bit */
1386 rx_csr = musb_readw(epio, MUSB_RXCSR);
1387 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1388 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1389 musb_writew(epio, MUSB_RXCSR, rx_csr);
1391 cur_qh = first_qh(&musb->in_bulk);
1393 urb = next_urb(cur_qh);
1394 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1395 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1396 musb->dma_controller->channel_abort(dma);
1397 urb->actual_length += dma->actual_len;
1398 dma->actual_len = 0L;
1400 musb_save_toggle(ep, 1, urb);
1402 /* move cur_qh to end of queue */
1403 list_move_tail(&cur_qh->ring, &musb->in_bulk);
1405 /* get the next qh from musb->in_bulk */
1406 next_qh = first_qh(&musb->in_bulk);
1408 /* set rx_reinit and schedule the next qh */
1410 musb_start_urb(musb, 1, next_qh);
1415 * Service an RX interrupt for the given IN endpoint; docs cover bulk, iso,
1416 * and high-bandwidth IN transfer cases.
1418 void musb_host_rx(struct musb *musb, u8 epnum)
1421 struct musb_hw_ep *hw_ep = musb->endpoints + epnum;
1422 void __iomem *epio = hw_ep->regs;
1423 struct musb_qh *qh = hw_ep->in_qh;
1425 void __iomem *mbase = musb->mregs;
1428 bool iso_err = false;
1431 struct dma_channel *dma;
1433 musb_ep_select(mbase, epnum);
1436 dma = is_dma_capable() ? hw_ep->rx_channel : NULL;
1440 rx_csr = musb_readw(epio, MUSB_RXCSR);
1443 if (unlikely(!urb)) {
1444 /* REVISIT -- THIS SHOULD NEVER HAPPEN ... but, at least
1445 * usbtest #11 (unlinks) triggers it regularly, sometimes
1446 * with fifo full. (Only with DMA??)
1448 DBG(3, "BOGUS RX%d ready, csr %04x, count %d\n", epnum, val,
1449 musb_readw(epio, MUSB_RXCOUNT));
1450 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1456 DBG(5, "<== hw %d rxcsr %04x, urb actual %d (+dma %zu)\n",
1457 epnum, rx_csr, urb->actual_length,
1458 dma ? dma->actual_len : 0);
1460 /* check for errors, concurrent stall & unlink is not really
1462 if (rx_csr & MUSB_RXCSR_H_RXSTALL) {
1463 DBG(3, "RX end %d STALL\n", epnum);
1465 /* stall; record URB status */
1468 } else if (rx_csr & MUSB_RXCSR_H_ERROR) {
1469 DBG(3, "end %d RX proto error\n", epnum);
1472 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1474 } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
1476 if (USB_ENDPOINT_XFER_ISOC != qh->type) {
1477 DBG(6, "RX end %d NAK timeout\n", epnum);
1479 /* NOTE: NAKing is *NOT* an error, so we want to
1480 * continue. Except ... if there's a request for
1481 * another QH, use that instead of starving it.
1483 * Devices like Ethernet and serial adapters keep
1484 * reads posted at all times, which will starve
1485 * other devices without this logic.
1487 if (usb_pipebulk(urb->pipe)
1489 && !list_is_singular(&musb->in_bulk)) {
1490 musb_bulk_rx_nak_timeout(musb, hw_ep);
1493 musb_ep_select(mbase, epnum);
1494 rx_csr |= MUSB_RXCSR_H_WZC_BITS;
1495 rx_csr &= ~MUSB_RXCSR_DATAERROR;
1496 musb_writew(epio, MUSB_RXCSR, rx_csr);
1500 DBG(4, "RX end %d ISO data error\n", epnum);
1501 /* packet error reported later */
1506 /* faults abort the transfer */
1508 /* clean up dma and collect transfer count */
1509 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1510 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1511 (void) musb->dma_controller->channel_abort(dma);
1512 xfer_len = dma->actual_len;
1514 musb_h_flush_rxfifo(hw_ep, MUSB_RXCSR_CLRDATATOG);
1515 musb_writeb(epio, MUSB_RXINTERVAL, 0);
1520 if (unlikely(dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY)) {
1521 /* SHOULD NEVER HAPPEN ... but at least DaVinci has done it */
1522 ERR("RX%d dma busy, csr %04x\n", epnum, rx_csr);
1526 /* thorough shutdown for now ... given more precise fault handling
1527 * and better queueing support, we might keep a DMA pipeline going
1528 * while processing this irq for earlier completions.
1531 /* FIXME this is _way_ too much in-line logic for Mentor DMA */
1533 #ifndef CONFIG_USB_INVENTRA_DMA
1534 if (rx_csr & MUSB_RXCSR_H_REQPKT) {
1535 /* REVISIT this happened for a while on some short reads...
1536 * the cleanup still needs investigation... looks bad...
1537 * and also duplicates dma cleanup code above ... plus,
1538 * shouldn't this be the "half full" double buffer case?
1540 if (dma_channel_status(dma) == MUSB_DMA_STATUS_BUSY) {
1541 dma->status = MUSB_DMA_STATUS_CORE_ABORT;
1542 (void) musb->dma_controller->channel_abort(dma);
1543 xfer_len = dma->actual_len;
1547 DBG(2, "RXCSR%d %04x, reqpkt, len %zu%s\n", epnum, rx_csr,
1548 xfer_len, dma ? ", dma" : "");
1549 rx_csr &= ~MUSB_RXCSR_H_REQPKT;
1551 musb_ep_select(mbase, epnum);
1552 musb_writew(epio, MUSB_RXCSR,
1553 MUSB_RXCSR_H_WZC_BITS | rx_csr);
1556 if (dma && (rx_csr & MUSB_RXCSR_DMAENAB)) {
1557 xfer_len = dma->actual_len;
1559 val &= ~(MUSB_RXCSR_DMAENAB
1560 | MUSB_RXCSR_H_AUTOREQ
1561 | MUSB_RXCSR_AUTOCLEAR
1562 | MUSB_RXCSR_RXPKTRDY);
1563 musb_writew(hw_ep->regs, MUSB_RXCSR, val);
1565 #ifdef CONFIG_USB_INVENTRA_DMA
1566 if (usb_pipeisoc(pipe)) {
1567 struct usb_iso_packet_descriptor *d;
1569 d = urb->iso_frame_desc + qh->iso_idx;
1570 d->actual_length = xfer_len;
1572 /* even if there was an error, we did the dma
1573 * for iso_frame_desc->length
1575 if (d->status != EILSEQ && d->status != -EOVERFLOW)
1578 if (++qh->iso_idx >= urb->number_of_packets)
1584 /* done if urb buffer is full or short packet is recd */
1585 done = (urb->actual_length + xfer_len >=
1586 urb->transfer_buffer_length
1587 || dma->actual_len < qh->maxpacket);
1590 /* send IN token for next packet, without AUTOREQ */
1592 val |= MUSB_RXCSR_H_REQPKT;
1593 musb_writew(epio, MUSB_RXCSR,
1594 MUSB_RXCSR_H_WZC_BITS | val);
1597 DBG(4, "ep %d dma %s, rxcsr %04x, rxcount %d\n", epnum,
1598 done ? "off" : "reset",
1599 musb_readw(epio, MUSB_RXCSR),
1600 musb_readw(epio, MUSB_RXCOUNT));
1604 } else if (urb->status == -EINPROGRESS) {
1605 /* if no errors, be sure a packet is ready for unloading */
1606 if (unlikely(!(rx_csr & MUSB_RXCSR_RXPKTRDY))) {
1608 ERR("Rx interrupt with no errors or packet!\n");
1610 /* FIXME this is another "SHOULD NEVER HAPPEN" */
1613 /* do the proper sequence to abort the transfer */
1614 musb_ep_select(mbase, epnum);
1615 val &= ~MUSB_RXCSR_H_REQPKT;
1616 musb_writew(epio, MUSB_RXCSR, val);
1620 /* we are expecting IN packets */
1621 #ifdef CONFIG_USB_INVENTRA_DMA
1623 struct dma_controller *c;
1628 rx_count = musb_readw(epio, MUSB_RXCOUNT);
1630 DBG(2, "RX%d count %d, buffer 0x%x len %d/%d\n",
1633 + urb->actual_length,
1635 urb->transfer_buffer_length);
1637 c = musb->dma_controller;
1639 if (usb_pipeisoc(pipe)) {
1641 struct usb_iso_packet_descriptor *d;
1643 d = urb->iso_frame_desc + qh->iso_idx;
1649 if (rx_count > d->length) {
1651 status = -EOVERFLOW;
1654 DBG(2, "** OVERFLOW %d into %d\n",\
1655 rx_count, d->length);
1661 buf = urb->transfer_dma + d->offset;
1664 buf = urb->transfer_dma +
1668 dma->desired_mode = 0;
1670 /* because of the issue below, mode 1 will
1671 * only rarely behave with correct semantics.
1673 if ((urb->transfer_flags &
1675 && (urb->transfer_buffer_length -
1678 dma->desired_mode = 1;
1679 if (rx_count < hw_ep->max_packet_sz_rx) {
1681 dma->bDesiredMode = 0;
1683 length = urb->transfer_buffer_length;
1687 /* Disadvantage of using mode 1:
1688 * It's basically usable only for mass storage class; essentially all
1689 * other protocols also terminate transfers on short packets.
1692 * An extra IN token is sent at the end of the transfer (due to AUTOREQ)
1693 * If you try to use mode 1 for (transfer_buffer_length - 512), and try
1694 * to use the extra IN token to grab the last packet using mode 0, then
1695 * the problem is that you cannot be sure when the device will send the
1696 * last packet and RxPktRdy set. Sometimes the packet is recd too soon
1697 * such that it gets lost when RxCSR is re-set at the end of the mode 1
1698 * transfer, while sometimes it is recd just a little late so that if you
1699 * try to configure for mode 0 soon after the mode 1 transfer is
1700 * completed, you will find rxcount 0. Okay, so you might think why not
1701 * wait for an interrupt when the pkt is recd. Well, you won't get any!
1704 val = musb_readw(epio, MUSB_RXCSR);
1705 val &= ~MUSB_RXCSR_H_REQPKT;
1707 if (dma->desired_mode == 0)
1708 val &= ~MUSB_RXCSR_H_AUTOREQ;
1710 val |= MUSB_RXCSR_H_AUTOREQ;
1711 val |= MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAENAB;
1713 musb_writew(epio, MUSB_RXCSR,
1714 MUSB_RXCSR_H_WZC_BITS | val);
1716 /* REVISIT if when actual_length != 0,
1717 * transfer_buffer_length needs to be
1720 ret = c->channel_program(
1722 dma->desired_mode, buf, length);
1725 c->channel_release(dma);
1726 hw_ep->rx_channel = NULL;
1728 /* REVISIT reset CSR */
1731 #endif /* Mentor DMA */
1734 done = musb_host_packet_rx(musb, urb,
1736 DBG(6, "read %spacket\n", done ? "last " : "");
1741 urb->actual_length += xfer_len;
1742 qh->offset += xfer_len;
1744 if (urb->status == -EINPROGRESS)
1745 urb->status = status;
1746 musb_advance_schedule(musb, urb, hw_ep, USB_DIR_IN);
1750 /* schedule nodes correspond to peripheral endpoints, like an OHCI QH.
1751 * the software schedule associates multiple such nodes with a given
1752 * host side hardware endpoint + direction; scheduling may activate
1753 * that hardware endpoint.
1755 static int musb_schedule(
1762 int best_end, epnum;
1763 struct musb_hw_ep *hw_ep = NULL;
1764 struct list_head *head = NULL;
1766 /* use fixed hardware for control and bulk */
1767 if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
1768 head = &musb->control;
1769 hw_ep = musb->control_ep;
1773 /* else, periodic transfers get muxed to other endpoints */
1776 * We know this qh hasn't been scheduled, so all we need to do
1777 * is choose which hardware endpoint to put it on ...
1779 * REVISIT what we really want here is a regular schedule tree
1780 * like e.g. OHCI uses.
1785 for (epnum = 1, hw_ep = musb->endpoints + 1;
1786 epnum < musb->nr_endpoints;
1790 if (is_in || hw_ep->is_shared_fifo) {
1791 if (hw_ep->in_qh != NULL)
1793 } else if (hw_ep->out_qh != NULL)
1796 if (hw_ep == musb->bulk_ep)
1800 diff = hw_ep->max_packet_sz_rx - qh->maxpacket;
1802 diff = hw_ep->max_packet_sz_tx - qh->maxpacket;
1804 if (diff >= 0 && best_diff > diff) {
1809 /* use bulk reserved ep1 if no other ep is free */
1810 if (best_end < 0 && qh->type == USB_ENDPOINT_XFER_BULK) {
1811 hw_ep = musb->bulk_ep;
1813 head = &musb->in_bulk;
1815 head = &musb->out_bulk;
1817 /* Enable bulk RX NAK timeout scheme when bulk requests are
1818 * multiplexed. This scheme doen't work in high speed to full
1819 * speed scenario as NAK interrupts are not coming from a
1820 * full speed device connected to a high speed device.
1821 * NAK timeout interval is 8 (128 uframe or 16ms) for HS and
1822 * 4 (8 frame or 8ms) for FS device.
1824 if (is_in && qh->dev)
1826 (USB_SPEED_HIGH == qh->dev->speed) ? 8 : 4;
1828 } else if (best_end < 0) {
1834 hw_ep = musb->endpoints + best_end;
1835 DBG(4, "qh %p periodic slot %d\n", qh, best_end);
1838 idle = list_empty(head);
1839 list_add_tail(&qh->ring, head);
1843 qh->hep->hcpriv = qh;
1845 musb_start_urb(musb, is_in, qh);
1849 static int musb_urb_enqueue(
1850 struct usb_hcd *hcd,
1854 unsigned long flags;
1855 struct musb *musb = hcd_to_musb(hcd);
1856 struct usb_host_endpoint *hep = urb->ep;
1858 struct usb_endpoint_descriptor *epd = &hep->desc;
1863 /* host role must be active */
1864 if (!is_host_active(musb) || !musb->is_active)
1867 spin_lock_irqsave(&musb->lock, flags);
1868 ret = usb_hcd_link_urb_to_ep(hcd, urb);
1869 qh = ret ? NULL : hep->hcpriv;
1872 spin_unlock_irqrestore(&musb->lock, flags);
1874 /* DMA mapping was already done, if needed, and this urb is on
1875 * hep->urb_list now ... so we're done, unless hep wasn't yet
1876 * scheduled onto a live qh.
1878 * REVISIT best to keep hep->hcpriv valid until the endpoint gets
1879 * disabled, testing for empty qh->ring and avoiding qh setup costs
1880 * except for the first urb queued after a config change.
1885 /* Allocate and initialize qh, minimizing the work done each time
1886 * hw_ep gets reprogrammed, or with irqs blocked. Then schedule it.
1888 * REVISIT consider a dedicated qh kmem_cache, so it's harder
1889 * for bugs in other kernel code to break this driver...
1891 qh = kzalloc(sizeof *qh, mem_flags);
1893 spin_lock_irqsave(&musb->lock, flags);
1894 usb_hcd_unlink_urb_from_ep(hcd, urb);
1895 spin_unlock_irqrestore(&musb->lock, flags);
1901 INIT_LIST_HEAD(&qh->ring);
1904 qh->maxpacket = le16_to_cpu(epd->wMaxPacketSize);
1906 /* no high bandwidth support yet */
1907 if (qh->maxpacket & ~0x7ff) {
1912 qh->epnum = usb_endpoint_num(epd);
1913 qh->type = usb_endpoint_type(epd);
1915 /* NOTE: urb->dev->devnum is wrong during SET_ADDRESS */
1916 qh->addr_reg = (u8) usb_pipedevice(urb->pipe);
1918 /* precompute rxtype/txtype/type0 register */
1919 type_reg = (qh->type << 4) | qh->epnum;
1920 switch (urb->dev->speed) {
1924 case USB_SPEED_FULL:
1930 qh->type_reg = type_reg;
1932 /* Precompute RXINTERVAL/TXINTERVAL register */
1934 case USB_ENDPOINT_XFER_INT:
1936 * Full/low speeds use the linear encoding,
1937 * high speed uses the logarithmic encoding.
1939 if (urb->dev->speed <= USB_SPEED_FULL) {
1940 interval = max_t(u8, epd->bInterval, 1);
1944 case USB_ENDPOINT_XFER_ISOC:
1945 /* ISO always uses logarithmic encoding */
1946 interval = min_t(u8, epd->bInterval, 16);
1949 /* REVISIT we actually want to use NAK limits, hinting to the
1950 * transfer scheduling logic to try some other qh, e.g. try
1953 * interval = (USB_SPEED_HIGH == urb->dev->speed) ? 16 : 2;
1955 * The downside of disabling this is that transfer scheduling
1956 * gets VERY unfair for nonperiodic transfers; a misbehaving
1957 * peripheral could make that hurt. That's perfectly normal
1958 * for reads from network or serial adapters ... so we have
1959 * partial NAKlimit support for bulk RX.
1961 * The upside of disabling it is simpler transfer scheduling.
1965 qh->intv_reg = interval;
1967 /* precompute addressing for external hub/tt ports */
1968 if (musb->is_multipoint) {
1969 struct usb_device *parent = urb->dev->parent;
1971 if (parent != hcd->self.root_hub) {
1972 qh->h_addr_reg = (u8) parent->devnum;
1974 /* set up tt info if needed */
1976 qh->h_port_reg = (u8) urb->dev->ttport;
1977 if (urb->dev->tt->hub)
1979 (u8) urb->dev->tt->hub->devnum;
1980 if (urb->dev->tt->multi)
1981 qh->h_addr_reg |= 0x80;
1986 /* invariant: hep->hcpriv is null OR the qh that's already scheduled.
1987 * until we get real dma queues (with an entry for each urb/buffer),
1988 * we only have work to do in the former case.
1990 spin_lock_irqsave(&musb->lock, flags);
1992 /* some concurrent activity submitted another urb to hep...
1993 * odd, rare, error prone, but legal.
1998 ret = musb_schedule(musb, qh,
1999 epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK);
2003 /* FIXME set urb->start_frame for iso/intr, it's tested in
2004 * musb_start_urb(), but otherwise only konicawc cares ...
2007 spin_unlock_irqrestore(&musb->lock, flags);
2011 spin_lock_irqsave(&musb->lock, flags);
2012 usb_hcd_unlink_urb_from_ep(hcd, urb);
2013 spin_unlock_irqrestore(&musb->lock, flags);
2021 * abort a transfer that's at the head of a hardware queue.
2022 * called with controller locked, irqs blocked
2023 * that hardware queue advances to the next transfer, unless prevented
2025 static int musb_cleanup_urb(struct urb *urb, struct musb_qh *qh, int is_in)
2027 struct musb_hw_ep *ep = qh->hw_ep;
2028 void __iomem *epio = ep->regs;
2029 unsigned hw_end = ep->epnum;
2030 void __iomem *regs = ep->musb->mregs;
2034 musb_ep_select(regs, hw_end);
2036 if (is_dma_capable()) {
2037 struct dma_channel *dma;
2039 dma = is_in ? ep->rx_channel : ep->tx_channel;
2041 status = ep->musb->dma_controller->channel_abort(dma);
2043 "abort %cX%d DMA for urb %p --> %d\n",
2044 is_in ? 'R' : 'T', ep->epnum,
2046 urb->actual_length += dma->actual_len;
2050 /* turn off DMA requests, discard state, stop polling ... */
2052 /* giveback saves bulk toggle */
2053 csr = musb_h_flush_rxfifo(ep, 0);
2055 /* REVISIT we still get an irq; should likely clear the
2056 * endpoint's irq status here to avoid bogus irqs.
2057 * clearing that status is platform-specific...
2059 } else if (ep->epnum) {
2060 musb_h_tx_flush_fifo(ep);
2061 csr = musb_readw(epio, MUSB_TXCSR);
2062 csr &= ~(MUSB_TXCSR_AUTOSET
2063 | MUSB_TXCSR_DMAENAB
2064 | MUSB_TXCSR_H_RXSTALL
2065 | MUSB_TXCSR_H_NAKTIMEOUT
2066 | MUSB_TXCSR_H_ERROR
2067 | MUSB_TXCSR_TXPKTRDY);
2068 musb_writew(epio, MUSB_TXCSR, csr);
2069 /* REVISIT may need to clear FLUSHFIFO ... */
2070 musb_writew(epio, MUSB_TXCSR, csr);
2071 /* flush cpu writebuffer */
2072 csr = musb_readw(epio, MUSB_TXCSR);
2074 musb_h_ep0_flush_fifo(ep);
2077 musb_advance_schedule(ep->musb, urb, ep, is_in);
2081 static int musb_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
2083 struct musb *musb = hcd_to_musb(hcd);
2085 struct list_head *sched;
2086 unsigned long flags;
2089 DBG(4, "urb=%p, dev%d ep%d%s\n", urb,
2090 usb_pipedevice(urb->pipe),
2091 usb_pipeendpoint(urb->pipe),
2092 usb_pipein(urb->pipe) ? "in" : "out");
2094 spin_lock_irqsave(&musb->lock, flags);
2095 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
2103 /* Any URB not actively programmed into endpoint hardware can be
2104 * immediately given back; that's any URB not at the head of an
2105 * endpoint queue, unless someday we get real DMA queues. And even
2106 * if it's at the head, it might not be known to the hardware...
2108 * Otherwise abort current transfer, pending dma, etc.; urb->status
2109 * has already been updated. This is a synchronous abort; it'd be
2110 * OK to hold off until after some IRQ, though.
2112 if (!qh->is_ready || urb->urb_list.prev != &qh->hep->urb_list)
2116 case USB_ENDPOINT_XFER_CONTROL:
2117 sched = &musb->control;
2119 case USB_ENDPOINT_XFER_BULK:
2121 if (usb_pipein(urb->pipe))
2122 sched = &musb->in_bulk;
2124 sched = &musb->out_bulk;
2128 /* REVISIT when we get a schedule tree, periodic
2129 * transfers won't always be at the head of a
2130 * singleton queue...
2137 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2138 if (ret < 0 || (sched && qh != first_qh(sched))) {
2139 int ready = qh->is_ready;
2143 __musb_giveback(musb, urb, 0);
2144 qh->is_ready = ready;
2146 /* If nothing else (usually musb_giveback) is using it
2147 * and its URB list has emptied, recycle this qh.
2149 if (ready && list_empty(&qh->hep->urb_list)) {
2150 qh->hep->hcpriv = NULL;
2151 list_del(&qh->ring);
2155 ret = musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2157 spin_unlock_irqrestore(&musb->lock, flags);
2161 /* disable an endpoint */
2163 musb_h_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
2165 u8 epnum = hep->desc.bEndpointAddress;
2166 unsigned long flags;
2167 struct musb *musb = hcd_to_musb(hcd);
2168 u8 is_in = epnum & USB_DIR_IN;
2171 struct list_head *sched;
2173 spin_lock_irqsave(&musb->lock, flags);
2180 case USB_ENDPOINT_XFER_CONTROL:
2181 sched = &musb->control;
2183 case USB_ENDPOINT_XFER_BULK:
2186 sched = &musb->in_bulk;
2188 sched = &musb->out_bulk;
2192 /* REVISIT when we get a schedule tree, periodic transfers
2193 * won't always be at the head of a singleton queue...
2199 /* NOTE: qh is invalid unless !list_empty(&hep->urb_list) */
2201 /* kick first urb off the hardware, if needed */
2203 if (!sched || qh == first_qh(sched)) {
2206 /* make software (then hardware) stop ASAP */
2208 urb->status = -ESHUTDOWN;
2211 musb_cleanup_urb(urb, qh, urb->pipe & USB_DIR_IN);
2213 /* Then nuke all the others ... and advance the
2214 * queue on hw_ep (e.g. bulk ring) when we're done.
2216 while (!list_empty(&hep->urb_list)) {
2218 urb->status = -ESHUTDOWN;
2219 musb_advance_schedule(musb, urb, qh->hw_ep, is_in);
2222 /* Just empty the queue; the hardware is busy with
2223 * other transfers, and since !qh->is_ready nothing
2224 * will activate any of these as it advances.
2226 while (!list_empty(&hep->urb_list))
2227 __musb_giveback(musb, next_urb(qh), -ESHUTDOWN);
2230 list_del(&qh->ring);
2234 spin_unlock_irqrestore(&musb->lock, flags);
2237 static int musb_h_get_frame_number(struct usb_hcd *hcd)
2239 struct musb *musb = hcd_to_musb(hcd);
2241 return musb_readw(musb->mregs, MUSB_FRAME);
2244 static int musb_h_start(struct usb_hcd *hcd)
2246 struct musb *musb = hcd_to_musb(hcd);
2248 /* NOTE: musb_start() is called when the hub driver turns
2249 * on port power, or when (OTG) peripheral starts.
2251 hcd->state = HC_STATE_RUNNING;
2252 musb->port1_status = 0;
2256 static void musb_h_stop(struct usb_hcd *hcd)
2258 musb_stop(hcd_to_musb(hcd));
2259 hcd->state = HC_STATE_HALT;
2262 static int musb_bus_suspend(struct usb_hcd *hcd)
2264 struct musb *musb = hcd_to_musb(hcd);
2266 if (musb->xceiv.state == OTG_STATE_A_SUSPEND)
2269 if (is_host_active(musb) && musb->is_active) {
2270 WARNING("trying to suspend as %s is_active=%i\n",
2271 otg_state_string(musb), musb->is_active);
2277 static int musb_bus_resume(struct usb_hcd *hcd)
2279 /* resuming child port does the work */
2283 const struct hc_driver musb_hc_driver = {
2284 .description = "musb-hcd",
2285 .product_desc = "MUSB HDRC host driver",
2286 .hcd_priv_size = sizeof(struct musb),
2287 .flags = HCD_USB2 | HCD_MEMORY,
2289 /* not using irq handler or reset hooks from usbcore, since
2290 * those must be shared with peripheral code for OTG configs
2293 .start = musb_h_start,
2294 .stop = musb_h_stop,
2296 .get_frame_number = musb_h_get_frame_number,
2298 .urb_enqueue = musb_urb_enqueue,
2299 .urb_dequeue = musb_urb_dequeue,
2300 .endpoint_disable = musb_h_disable,
2302 .hub_status_data = musb_hub_status_data,
2303 .hub_control = musb_hub_control,
2304 .bus_suspend = musb_bus_suspend,
2305 .bus_resume = musb_bus_resume,
2306 /* .start_port_reset = NULL, */
2307 /* .hub_irq_enable = NULL, */