2 * Wireless Host Controller (WHC) qset management.
4 * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version
8 * 2 as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/uwb/umc.h>
21 #include <linux/usb.h>
23 #include "../../wusbcore/wusbhc.h"
27 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
29 struct whc_qset *qset;
32 qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
35 memset(qset, 0, sizeof(struct whc_qset));
40 INIT_LIST_HEAD(&qset->list_node);
41 INIT_LIST_HEAD(&qset->stds);
47 * qset_fill_qh - fill the static endpoint state in a qset's QHead
48 * @qset: the qset whose QH needs initializing with static endpoint
50 * @urb: an urb for a transfer to this endpoint
52 static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
54 struct usb_device *usb_dev = urb->dev;
55 struct usb_wireless_ep_comp_descriptor *epcd;
58 is_out = usb_pipeout(urb->pipe);
60 qset->max_packet = le16_to_cpu(urb->ep->desc.wMaxPacketSize);
62 epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
64 qset->max_seq = epcd->bMaxSequence;
65 qset->max_burst = epcd->bMaxBurst;
71 qset->qh.info1 = cpu_to_le32(
72 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
73 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
74 | usb_pipe_to_qh_type(urb->pipe)
75 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
76 | QH_INFO1_MAX_PKT_LEN(qset->max_packet)
78 qset->qh.info2 = cpu_to_le32(
79 QH_INFO2_BURST(qset->max_burst)
81 | QH_INFO2_MAX_COUNT(3)
82 | QH_INFO2_MAX_RETRY(3)
83 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
85 /* FIXME: where can we obtain these Tx parameters from? Why
86 * doesn't the chip know what Tx power to use? It knows the Rx
87 * strength and can presumably guess the Tx power required
89 qset->qh.info3 = cpu_to_le32(
91 | QH_INFO3_TX_PWR(0) /* 0 == max power */
94 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
98 * qset_clear - clear fields in a qset so it may be reinserted into a
101 * The sequence number and current window are not cleared (see
104 void qset_clear(struct whc *whc, struct whc_qset *qset)
106 qset->td_start = qset->td_end = qset->ntds = 0;
108 qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
109 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
110 qset->qh.err_count = 0;
111 qset->qh.scratch[0] = 0;
112 qset->qh.scratch[1] = 0;
113 qset->qh.scratch[2] = 0;
115 memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
117 init_completion(&qset->remove_complete);
121 * qset_reset - reset endpoint state in a qset.
123 * Clears the sequence number and current window. This qset must not
124 * be in the ASL or PZL.
126 void qset_reset(struct whc *whc, struct whc_qset *qset)
130 qset->qh.status &= ~QH_STATUS_SEQ_MASK;
131 qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
135 * get_qset - get the qset for an async endpoint
137 * A new qset is created if one does not already exist.
139 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
142 struct whc_qset *qset;
144 qset = urb->ep->hcpriv;
146 qset = qset_alloc(whc, mem_flags);
151 urb->ep->hcpriv = qset;
152 qset_fill_qh(qset, urb);
157 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
160 list_del_init(&qset->list_node);
161 complete(&qset->remove_complete);
165 * qset_add_qtds - add qTDs for an URB to a qset
167 * Returns true if the list (ASL/PZL) must be updated because (for a
168 * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
170 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
173 enum whc_update update = 0;
175 list_for_each_entry(std, &qset->stds, list_node) {
179 if (qset->ntds >= WHCI_QSET_TD_MAX
180 || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
184 continue; /* already has a qTD */
186 qtd = std->qtd = &qset->qtd[qset->td_end];
188 /* Fill in setup bytes for control transfers. */
189 if (usb_pipecontrol(std->urb->pipe))
190 memcpy(qtd->setup, std->urb->setup_packet, 8);
192 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
194 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
195 status |= QTD_STS_LAST_PKT;
198 * For an IN transfer the iAlt field should be set so
199 * the h/w will automatically advance to the next
200 * transfer. However, if there are 8 or more TDs
201 * remaining in this transfer then iAlt cannot be set
202 * as it could point to somewhere in this transfer.
204 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
206 ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
207 status |= QTD_STS_IALT(ialt);
208 } else if (usb_pipein(std->urb->pipe))
209 qset->pause_after_urb = std->urb;
211 if (std->num_pointers)
212 qtd->options = cpu_to_le32(QTD_OPT_IOC);
214 qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
215 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
217 qtd->status = cpu_to_le32(status);
219 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
220 update = WHC_UPDATE_UPDATED;
222 if (++qset->td_end >= WHCI_QSET_TD_MAX)
231 * qset_remove_qtd - remove the first qTD from a qset.
233 * The qTD might be still active (if it's part of a IN URB that
234 * resulted in a short read) so ensure it's deactivated.
236 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
238 qset->qtd[qset->td_start].status = 0;
240 if (++qset->td_start >= WHCI_QSET_TD_MAX)
245 static void qset_copy_bounce_to_sg(struct whc *whc, struct whc_std *std)
247 struct scatterlist *sg;
249 size_t remaining, offset;
251 bounce = std->bounce_buf;
252 remaining = std->len;
255 offset = std->bounce_offset;
260 len = min(sg->length - offset, remaining);
261 memcpy(sg_virt(sg) + offset, bounce, len);
267 if (offset >= sg->length) {
276 * qset_free_std - remove an sTD and free it.
277 * @whc: the WHCI host controller
278 * @std: the sTD to remove and free.
280 void qset_free_std(struct whc *whc, struct whc_std *std)
282 list_del(&std->list_node);
283 if (std->bounce_buf) {
284 bool is_out = usb_pipeout(std->urb->pipe);
287 if (std->num_pointers)
288 dma_addr = le64_to_cpu(std->pl_virt[0].buf_ptr);
290 dma_addr = std->dma_addr;
292 dma_unmap_single(whc->wusbhc.dev, dma_addr,
293 std->len, is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
295 qset_copy_bounce_to_sg(whc, std);
296 kfree(std->bounce_buf);
300 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
301 std->num_pointers * sizeof(struct whc_page_list_entry),
310 * qset_remove_qtds - remove an URB's qTDs (and sTDs).
312 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
315 struct whc_std *std, *t;
317 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
320 if (std->qtd != NULL)
321 qset_remove_qtd(whc, qset);
322 qset_free_std(whc, std);
327 * qset_free_stds - free any remaining sTDs for an URB.
329 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
331 struct whc_std *std, *t;
333 list_for_each_entry_safe(std, t, &qset->stds, list_node) {
335 qset_free_std(qset->whc, std);
339 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
341 dma_addr_t dma_addr = std->dma_addr;
346 /* Short buffers don't need a page list. */
347 if (std->len <= WHCI_PAGE_SIZE) {
348 std->num_pointers = 0;
352 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
353 ep = dma_addr + std->len;
354 std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
356 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
357 std->pl_virt = kmalloc(pl_len, mem_flags);
358 if (std->pl_virt == NULL)
360 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
362 for (p = 0; p < std->num_pointers; p++) {
363 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
364 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
371 * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
373 static void urb_dequeue_work(struct work_struct *work)
375 struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
376 struct whc_qset *qset = wurb->qset;
377 struct whc *whc = qset->whc;
380 if (wurb->is_async == true)
381 asl_update(whc, WUSBCMD_ASYNC_UPDATED
382 | WUSBCMD_ASYNC_SYNCED_DB
383 | WUSBCMD_ASYNC_QSET_RM);
385 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
386 | WUSBCMD_PERIODIC_SYNCED_DB
387 | WUSBCMD_PERIODIC_QSET_RM);
389 spin_lock_irqsave(&whc->lock, flags);
390 qset_remove_urb(whc, qset, wurb->urb, wurb->status);
391 spin_unlock_irqrestore(&whc->lock, flags);
394 static struct whc_std *qset_new_std(struct whc *whc, struct whc_qset *qset,
395 struct urb *urb, gfp_t mem_flags)
399 std = kzalloc(sizeof(struct whc_std), mem_flags);
406 INIT_LIST_HEAD(&std->list_node);
407 list_add_tail(&std->list_node, &qset->stds);
412 static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *urb,
416 struct scatterlist *sg;
419 struct whc_std *std = NULL;
420 struct whc_page_list_entry *entry;
421 dma_addr_t prev_end = 0;
425 remaining = urb->transfer_buffer_length;
427 for_each_sg(urb->sg->sg, sg, urb->num_sgs, i) {
429 size_t dma_remaining;
433 if (remaining == 0) {
437 dma_addr = sg_dma_address(sg);
438 dma_remaining = min_t(size_t, sg_dma_len(sg), remaining);
440 while (dma_remaining) {
444 * We can use the previous std (if it exists) provided that:
445 * - the previous one ended on a page boundary.
446 * - the current one begins on a page boundary.
447 * - the previous one isn't full.
449 * If a new std is needed but the previous one
450 * did not end on a wMaxPacketSize boundary
451 * then this sg list cannot be mapped onto
452 * multiple qTDs. Return an error and let the
453 * caller sort it out.
456 || (prev_end & (WHCI_PAGE_SIZE-1))
457 || (dma_addr & (WHCI_PAGE_SIZE-1))
458 || std->len + WHCI_PAGE_SIZE > QTD_MAX_XFER_SIZE) {
459 if (prev_end % qset->max_packet != 0)
461 std = qset_new_std(whc, qset, urb, mem_flags);
469 dma_len = dma_remaining;
472 * If the remainder in this element doesn't
473 * fit in a single qTD, end the qTD on a
474 * wMaxPacketSize boundary.
476 if (std->len + dma_len > QTD_MAX_XFER_SIZE) {
477 dma_len = QTD_MAX_XFER_SIZE - std->len;
478 ep = ((dma_addr + dma_len) / qset->max_packet) * qset->max_packet;
479 dma_len = ep - dma_addr;
483 std->ntds_remaining = -1; /* filled in later */
485 sp = dma_addr & ~(WHCI_PAGE_SIZE-1);
486 ep = dma_addr + dma_len;
487 num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
488 std->num_pointers += num_pointers;
490 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
492 std->pl_virt = krealloc(std->pl_virt, pl_len, mem_flags);
493 if (std->pl_virt == NULL) {
497 for (;p < std->num_pointers; p++, entry++) {
498 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
499 dma_addr = (dma_addr + WHCI_PAGE_SIZE) & ~(WHCI_PAGE_SIZE-1);
502 prev_end = dma_addr = ep;
503 dma_remaining -= dma_len;
504 remaining -= dma_len;
508 /* Now the number of stds is know, go back and fill in
509 std->ntds_remaining. */
510 list_for_each_entry(std, &qset->stds, list_node) {
511 if (std->ntds_remaining == -1) {
512 pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
513 std->ntds_remaining = ntds--;
514 std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt,
515 pl_len, DMA_TO_DEVICE);
522 * qset_add_urb_sg_linearize - add an urb with sg list, copying the data
524 * If the URB contains an sg list whose elements cannot be directly
525 * mapped to qTDs then the data must be transferred via bounce
528 static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
529 struct urb *urb, gfp_t mem_flags)
531 bool is_out = usb_pipeout(urb->pipe);
535 struct whc_std *std = NULL;
537 struct scatterlist *sg;
540 /* limit maximum bounce buffer to 16 * 3.5 KiB ~= 28 k */
541 max_std_len = qset->max_burst * qset->max_packet;
543 remaining = urb->transfer_buffer_length;
545 for_each_sg(urb->sg->sg, sg, urb->sg->nents, i) {
550 if (remaining == 0) {
554 sg_remaining = min_t(size_t, remaining, sg->length);
557 while (sg_remaining) {
558 if (!std || std->len == max_std_len) {
559 std = qset_new_std(whc, qset, urb, mem_flags);
562 std->bounce_buf = kmalloc(max_std_len, mem_flags);
563 if (std->bounce_buf == NULL)
566 std->bounce_offset = orig - sg_virt(sg);
567 bounce = std->bounce_buf;
571 len = min(sg_remaining, max_std_len - std->len);
574 memcpy(bounce, orig, len);
577 std->ntds_remaining = -1; /* filled in later */
587 * For each of the new sTDs, map the bounce buffers, create
588 * page lists (if necessary), and fill in std->ntds_remaining.
590 list_for_each_entry(std, &qset->stds, list_node) {
591 if (std->ntds_remaining != -1)
594 std->dma_addr = dma_map_single(&whc->umc->dev, std->bounce_buf, std->len,
595 is_out ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
597 if (qset_fill_page_list(whc, std, mem_flags) < 0)
600 std->ntds_remaining = ntds--;
607 * qset_add_urb - add an urb to the qset's queue.
609 * The URB is chopped into sTDs, one for each qTD that will required.
610 * At least one qTD (and sTD) is required even if the transfer has no
611 * data (e.g., for some control transfers).
613 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
616 struct whc_urb *wurb;
617 int remaining = urb->transfer_buffer_length;
618 u64 transfer_dma = urb->transfer_dma;
622 wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
628 INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
631 ret = qset_add_urb_sg(whc, qset, urb, mem_flags);
632 if (ret == -EINVAL) {
633 qset_free_stds(qset, urb);
634 ret = qset_add_urb_sg_linearize(whc, qset, urb, mem_flags);
641 ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
642 if (ntds_remaining == 0)
645 while (ntds_remaining) {
650 if (std_len > QTD_MAX_XFER_SIZE)
651 std_len = QTD_MAX_XFER_SIZE;
653 std = qset_new_std(whc, qset, urb, mem_flags);
657 std->dma_addr = transfer_dma;
659 std->ntds_remaining = ntds_remaining;
661 if (qset_fill_page_list(whc, std, mem_flags) < 0)
665 remaining -= std_len;
666 transfer_dma += std_len;
672 qset_free_stds(qset, urb);
677 * qset_remove_urb - remove an URB from the urb queue.
679 * The URB is returned to the USB subsystem.
681 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
682 struct urb *urb, int status)
684 struct wusbhc *wusbhc = &whc->wusbhc;
685 struct whc_urb *wurb = urb->hcpriv;
687 usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
688 /* Drop the lock as urb->complete() may enqueue another urb. */
689 spin_unlock(&whc->lock);
690 wusbhc_giveback_urb(wusbhc, urb, status);
691 spin_lock(&whc->lock);
697 * get_urb_status_from_qtd - get the completed urb status from qTD status
698 * @urb: completed urb
699 * @status: qTD status
701 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
703 if (status & QTD_STS_HALTED) {
704 if (status & QTD_STS_DBE)
705 return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
706 else if (status & QTD_STS_BABBLE)
708 else if (status & QTD_STS_RCE)
712 if (usb_pipein(urb->pipe)
713 && (urb->transfer_flags & URB_SHORT_NOT_OK)
714 && urb->actual_length < urb->transfer_buffer_length)
720 * process_inactive_qtd - process an inactive (but not halted) qTD.
722 * Update the urb with the transfer bytes from the qTD, if the urb is
723 * completely transfered or (in the case of an IN only) the LPF is
724 * set, then the transfer is complete and the urb should be returned
727 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
730 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
731 struct urb *urb = std->urb;
735 status = le32_to_cpu(qtd->status);
737 urb->actual_length += std->len - QTD_STS_TO_LEN(status);
739 if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
742 complete = whc_std_last(std);
744 qset_remove_qtd(whc, qset);
745 qset_free_std(whc, std);
748 * Transfers for this URB are complete? Then return it to the
752 qset_remove_qtds(whc, qset, urb);
753 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
756 * If iAlt isn't valid then the hardware didn't
757 * advance iCur. Adjust the start and end pointers to
760 if (!(status & QTD_STS_IALT_VALID))
761 qset->td_start = qset->td_end
762 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
763 qset->pause_after_urb = NULL;
768 * process_halted_qtd - process a qset with a halted qtd
770 * Remove all the qTDs for the failed URB and return the failed URB to
771 * the USB subsystem. Then remove all other qTDs so the qset can be
774 * FIXME: this is the point where rate adaptation can be done. If a
775 * transfer failed because it exceeded the maximum number of retries
776 * then it could be reactivated with a slower rate without having to
779 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
782 struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
783 struct urb *urb = std->urb;
786 urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
788 qset_remove_qtds(whc, qset, urb);
789 qset_remove_urb(whc, qset, urb, urb_status);
791 list_for_each_entry(std, &qset->stds, list_node) {
794 qset_remove_qtd(whc, qset);
801 void qset_free(struct whc *whc, struct whc_qset *qset)
803 dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
807 * qset_delete - wait for a qset to be unused, then free it.
809 void qset_delete(struct whc *whc, struct whc_qset *qset)
811 wait_for_completion(&qset->remove_complete);
812 qset_free(whc, qset);