wusb: WHCI host controller driver
[safe/jmp/linux-2.6] / drivers / usb / host / whci / qset.c
1 /*
2  * Wireless Host Controller (WHC) qset management.
3  *
4  * Copyright (C) 2007 Cambridge Silicon Radio Ltd.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version
8  * 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include <linux/kernel.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/uwb/umc.h>
21 #include <linux/usb.h>
22
23 #include "../../wusbcore/wusbhc.h"
24
25 #include "whcd.h"
26
27 void dump_qset(struct whc_qset *qset, struct device *dev)
28 {
29         struct whc_std *std;
30         struct urb *urb = NULL;
31         int i;
32
33         dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma);
34         dev_dbg(dev, "  -> %08x\n", (u32)qset->qh.link);
35         dev_dbg(dev, "  info: %08x %08x %08x\n",
36                 qset->qh.info1, qset->qh.info2,  qset->qh.info3);
37         dev_dbg(dev, "  sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count);
38         dev_dbg(dev, "  TD: sts: %08x opts: %08x\n",
39                 qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options);
40
41         for (i = 0; i < WHCI_QSET_TD_MAX; i++) {
42                 dev_dbg(dev, "  %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n",
43                         i == qset->td_start ? 'S' : ' ',
44                         i == qset->td_end ? 'E' : ' ',
45                         i, qset->qtd[i].status, qset->qtd[i].options,
46                         (u32)qset->qtd[i].page_list_ptr);
47         }
48         dev_dbg(dev, "  ntds: %d\n", qset->ntds);
49         list_for_each_entry(std, &qset->stds, list_node) {
50                 if (urb != std->urb) {
51                         urb = std->urb;
52                         dev_dbg(dev, "  urb %p transferred: %d bytes\n", urb,
53                                 urb->actual_length);
54                 }
55                 if (std->qtd)
56                         dev_dbg(dev, "    sTD[%td]: %zu bytes @ %08x\n",
57                                 std->qtd - &qset->qtd[0],
58                                 std->len, std->num_pointers ?
59                                 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
60                 else
61                         dev_dbg(dev, "    sTD[-]: %zd bytes @ %08x\n",
62                                 std->len, std->num_pointers ?
63                                 (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr);
64         }
65 }
66
67 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags)
68 {
69         struct whc_qset *qset;
70         dma_addr_t dma;
71
72         qset = dma_pool_alloc(whc->qset_pool, mem_flags, &dma);
73         if (qset == NULL)
74                 return NULL;
75         memset(qset, 0, sizeof(struct whc_qset));
76
77         qset->qset_dma = dma;
78         qset->whc = whc;
79
80         INIT_LIST_HEAD(&qset->list_node);
81         INIT_LIST_HEAD(&qset->stds);
82
83         return qset;
84 }
85
86 /**
87  * qset_fill_qh - fill the static endpoint state in a qset's QHead
88  * @qset: the qset whose QH needs initializing with static endpoint
89  *        state
90  * @urb:  an urb for a transfer to this endpoint
91  */
92 static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
93 {
94         struct usb_device *usb_dev = urb->dev;
95         struct usb_wireless_ep_comp_descriptor *epcd;
96         bool is_out;
97
98         is_out = usb_pipeout(urb->pipe);
99
100         epcd = (struct usb_wireless_ep_comp_descriptor *)qset->ep->extra;
101
102         if (epcd) {
103                 qset->max_seq = epcd->bMaxSequence;
104                 qset->max_burst = epcd->bMaxBurst;
105         } else {
106                 qset->max_seq = 2;
107                 qset->max_burst = 1;
108         }
109
110         qset->qh.info1 = cpu_to_le32(
111                 QH_INFO1_EP(usb_pipeendpoint(urb->pipe))
112                 | (is_out ? QH_INFO1_DIR_OUT : QH_INFO1_DIR_IN)
113                 | usb_pipe_to_qh_type(urb->pipe)
114                 | QH_INFO1_DEV_INFO_IDX(wusb_port_no_to_idx(usb_dev->portnum))
115                 | QH_INFO1_MAX_PKT_LEN(usb_maxpacket(urb->dev, urb->pipe, is_out))
116                 );
117         qset->qh.info2 = cpu_to_le32(
118                 QH_INFO2_BURST(qset->max_burst)
119                 | QH_INFO2_DBP(0)
120                 | QH_INFO2_MAX_COUNT(3)
121                 | QH_INFO2_MAX_RETRY(3)
122                 | QH_INFO2_MAX_SEQ(qset->max_seq - 1)
123                 );
124         /* FIXME: where can we obtain these Tx parameters from?  Why
125          * doesn't the chip know what Tx power to use? It knows the Rx
126          * strength and can presumably guess the Tx power required
127          * from that? */
128         qset->qh.info3 = cpu_to_le32(
129                 QH_INFO3_TX_RATE_53_3
130                 | QH_INFO3_TX_PWR(0) /* 0 == max power */
131                 );
132 }
133
134 /**
135  * qset_clear - clear fields in a qset so it may be reinserted into a
136  * schedule
137  */
138 void qset_clear(struct whc *whc, struct whc_qset *qset)
139 {
140         qset->td_start = qset->td_end = qset->ntds = 0;
141         qset->remove = 0;
142
143         qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
144         qset->qh.status = cpu_to_le16(QH_STATUS_ICUR(qset->td_start));
145         qset->qh.err_count = 0;
146         qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
147         qset->qh.scratch[0] = 0;
148         qset->qh.scratch[1] = 0;
149         qset->qh.scratch[2] = 0;
150
151         memset(&qset->qh.overlay, 0, sizeof(qset->qh.overlay));
152
153         init_completion(&qset->remove_complete);
154 }
155
156 /**
157  * get_qset - get the qset for an async endpoint
158  *
159  * A new qset is created if one does not already exist.
160  */
161 struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
162                                  gfp_t mem_flags)
163 {
164         struct whc_qset *qset;
165
166         qset = urb->ep->hcpriv;
167         if (qset == NULL) {
168                 qset = qset_alloc(whc, mem_flags);
169                 if (qset == NULL)
170                         return NULL;
171
172                 qset->ep = urb->ep;
173                 urb->ep->hcpriv = qset;
174                 qset_fill_qh(qset, urb);
175         }
176         return qset;
177 }
178
179 void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
180 {
181         list_del_init(&qset->list_node);
182         complete(&qset->remove_complete);
183 }
184
185 /**
186  * qset_add_qtds - add qTDs for an URB to a qset
187  *
188  * Returns true if the list (ASL/PZL) must be updated because (for a
189  * WHCI 0.95 controller) an activated qTD was pointed to be iCur.
190  */
191 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset)
192 {
193         struct whc_std *std;
194         enum whc_update update = 0;
195
196         list_for_each_entry(std, &qset->stds, list_node) {
197                 struct whc_qtd *qtd;
198                 uint32_t status;
199
200                 if (qset->ntds >= WHCI_QSET_TD_MAX
201                     || (qset->pause_after_urb && std->urb != qset->pause_after_urb))
202                         break;
203
204                 if (std->qtd)
205                         continue; /* already has a qTD */
206
207                 qtd = std->qtd = &qset->qtd[qset->td_end];
208
209                 /* Fill in setup bytes for control transfers. */
210                 if (usb_pipecontrol(std->urb->pipe))
211                         memcpy(qtd->setup, std->urb->setup_packet, 8);
212
213                 status = QTD_STS_ACTIVE | QTD_STS_LEN(std->len);
214
215                 if (whc_std_last(std) && usb_pipeout(std->urb->pipe))
216                         status |= QTD_STS_LAST_PKT;
217
218                 /*
219                  * For an IN transfer the iAlt field should be set so
220                  * the h/w will automatically advance to the next
221                  * transfer. However, if there are 8 or more TDs
222                  * remaining in this transfer then iAlt cannot be set
223                  * as it could point to somewhere in this transfer.
224                  */
225                 if (std->ntds_remaining < WHCI_QSET_TD_MAX) {
226                         int ialt;
227                         ialt = (qset->td_end + std->ntds_remaining) % WHCI_QSET_TD_MAX;
228                         status |= QTD_STS_IALT(ialt);
229                 } else if (usb_pipein(std->urb->pipe))
230                         qset->pause_after_urb = std->urb;
231
232                 if (std->num_pointers)
233                         qtd->options = cpu_to_le32(QTD_OPT_IOC);
234                 else
235                         qtd->options = cpu_to_le32(QTD_OPT_IOC | QTD_OPT_SMALL);
236                 qtd->page_list_ptr = cpu_to_le64(std->dma_addr);
237
238                 qtd->status = cpu_to_le32(status);
239
240                 if (QH_STATUS_TO_ICUR(qset->qh.status) == qset->td_end)
241                         update = WHC_UPDATE_UPDATED;
242
243                 if (++qset->td_end >= WHCI_QSET_TD_MAX)
244                         qset->td_end = 0;
245                 qset->ntds++;
246         }
247
248         return update;
249 }
250
251 /**
252  * qset_remove_qtd - remove the first qTD from a qset.
253  *
254  * The qTD might be still active (if it's part of a IN URB that
255  * resulted in a short read) so ensure it's deactivated.
256  */
257 static void qset_remove_qtd(struct whc *whc, struct whc_qset *qset)
258 {
259         qset->qtd[qset->td_start].status = 0;
260
261         if (++qset->td_start >= WHCI_QSET_TD_MAX)
262                 qset->td_start = 0;
263         qset->ntds--;
264 }
265
266 /**
267  * qset_free_std - remove an sTD and free it.
268  * @whc: the WHCI host controller
269  * @std: the sTD to remove and free.
270  */
271 void qset_free_std(struct whc *whc, struct whc_std *std)
272 {
273         list_del(&std->list_node);
274         if (std->num_pointers) {
275                 dma_unmap_single(whc->wusbhc.dev, std->dma_addr,
276                                  std->num_pointers * sizeof(struct whc_page_list_entry),
277                                  DMA_TO_DEVICE);
278                 kfree(std->pl_virt);
279         }
280
281         kfree(std);
282 }
283
284 /**
285  * qset_remove_qtds - remove an URB's qTDs (and sTDs).
286  */
287 static void qset_remove_qtds(struct whc *whc, struct whc_qset *qset,
288                              struct urb *urb)
289 {
290         struct whc_std *std, *t;
291
292         list_for_each_entry_safe(std, t, &qset->stds, list_node) {
293                 if (std->urb != urb)
294                         break;
295                 if (std->qtd != NULL)
296                         qset_remove_qtd(whc, qset);
297                 qset_free_std(whc, std);
298         }
299 }
300
301 /**
302  * qset_free_stds - free any remaining sTDs for an URB.
303  */
304 static void qset_free_stds(struct whc_qset *qset, struct urb *urb)
305 {
306         struct whc_std *std, *t;
307
308         list_for_each_entry_safe(std, t, &qset->stds, list_node) {
309                 if (std->urb == urb)
310                         qset_free_std(qset->whc, std);
311         }
312 }
313
314 static int qset_fill_page_list(struct whc *whc, struct whc_std *std, gfp_t mem_flags)
315 {
316         dma_addr_t dma_addr = std->dma_addr;
317         dma_addr_t sp, ep;
318         size_t std_len = std->len;
319         size_t pl_len;
320         int p;
321
322         sp = ALIGN(dma_addr, WHCI_PAGE_SIZE);
323         ep = dma_addr + std_len;
324         std->num_pointers = DIV_ROUND_UP(ep - sp, WHCI_PAGE_SIZE);
325
326         pl_len = std->num_pointers * sizeof(struct whc_page_list_entry);
327         std->pl_virt = kmalloc(pl_len, mem_flags);
328         if (std->pl_virt == NULL)
329                 return -ENOMEM;
330         std->dma_addr = dma_map_single(whc->wusbhc.dev, std->pl_virt, pl_len, DMA_TO_DEVICE);
331
332         for (p = 0; p < std->num_pointers; p++) {
333                 std->pl_virt[p].buf_ptr = cpu_to_le64(dma_addr);
334                 dma_addr = ALIGN(dma_addr + WHCI_PAGE_SIZE, WHCI_PAGE_SIZE);
335         }
336
337         return 0;
338 }
339
340 /**
341  * urb_dequeue_work - executes asl/pzl update and gives back the urb to the system.
342  */
343 static void urb_dequeue_work(struct work_struct *work)
344 {
345         struct whc_urb *wurb = container_of(work, struct whc_urb, dequeue_work);
346         struct whc_qset *qset = wurb->qset;
347         struct whc *whc = qset->whc;
348         unsigned long flags;
349
350         if (wurb->is_async == true)
351                 asl_update(whc, WUSBCMD_ASYNC_UPDATED
352                            | WUSBCMD_ASYNC_SYNCED_DB
353                            | WUSBCMD_ASYNC_QSET_RM);
354         else
355                 pzl_update(whc, WUSBCMD_PERIODIC_UPDATED
356                            | WUSBCMD_PERIODIC_SYNCED_DB
357                            | WUSBCMD_PERIODIC_QSET_RM);
358
359         spin_lock_irqsave(&whc->lock, flags);
360         qset_remove_urb(whc, qset, wurb->urb, wurb->status);
361         spin_unlock_irqrestore(&whc->lock, flags);
362 }
363
364 /**
365  * qset_add_urb - add an urb to the qset's queue.
366  *
367  * The URB is chopped into sTDs, one for each qTD that will required.
368  * At least one qTD (and sTD) is required even if the transfer has no
369  * data (e.g., for some control transfers).
370  */
371 int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb,
372         gfp_t mem_flags)
373 {
374         struct whc_urb *wurb;
375         int remaining = urb->transfer_buffer_length;
376         u64 transfer_dma = urb->transfer_dma;
377         int ntds_remaining;
378
379         ntds_remaining = DIV_ROUND_UP(remaining, QTD_MAX_XFER_SIZE);
380         if (ntds_remaining == 0)
381                 ntds_remaining = 1;
382
383         wurb = kzalloc(sizeof(struct whc_urb), mem_flags);
384         if (wurb == NULL)
385                 goto err_no_mem;
386         urb->hcpriv = wurb;
387         wurb->qset = qset;
388         wurb->urb = urb;
389         INIT_WORK(&wurb->dequeue_work, urb_dequeue_work);
390
391         while (ntds_remaining) {
392                 struct whc_std *std;
393                 size_t std_len;
394
395                 std = kmalloc(sizeof(struct whc_std), mem_flags);
396                 if (std == NULL)
397                         goto err_no_mem;
398
399                 std_len = remaining;
400                 if (std_len > QTD_MAX_XFER_SIZE)
401                         std_len = QTD_MAX_XFER_SIZE;
402
403                 std->urb = urb;
404                 std->dma_addr = transfer_dma;
405                 std->len = std_len;
406                 std->ntds_remaining = ntds_remaining;
407                 std->qtd = NULL;
408
409                 INIT_LIST_HEAD(&std->list_node);
410                 list_add_tail(&std->list_node, &qset->stds);
411
412                 if (std_len > WHCI_PAGE_SIZE) {
413                         if (qset_fill_page_list(whc, std, mem_flags) < 0)
414                                 goto err_no_mem;
415                 } else
416                         std->num_pointers = 0;
417
418                 ntds_remaining--;
419                 remaining -= std_len;
420                 transfer_dma += std_len;
421         }
422
423         return 0;
424
425 err_no_mem:
426         qset_free_stds(qset, urb);
427         return -ENOMEM;
428 }
429
430 /**
431  * qset_remove_urb - remove an URB from the urb queue.
432  *
433  * The URB is returned to the USB subsystem.
434  */
435 void qset_remove_urb(struct whc *whc, struct whc_qset *qset,
436                             struct urb *urb, int status)
437 {
438         struct wusbhc *wusbhc = &whc->wusbhc;
439         struct whc_urb *wurb = urb->hcpriv;
440
441         usb_hcd_unlink_urb_from_ep(&wusbhc->usb_hcd, urb);
442         /* Drop the lock as urb->complete() may enqueue another urb. */
443         spin_unlock(&whc->lock);
444         wusbhc_giveback_urb(wusbhc, urb, status);
445         spin_lock(&whc->lock);
446
447         kfree(wurb);
448 }
449
450 /**
451  * get_urb_status_from_qtd - get the completed urb status from qTD status
452  * @urb:    completed urb
453  * @status: qTD status
454  */
455 static int get_urb_status_from_qtd(struct urb *urb, u32 status)
456 {
457         if (status & QTD_STS_HALTED) {
458                 if (status & QTD_STS_DBE)
459                         return usb_pipein(urb->pipe) ? -ENOSR : -ECOMM;
460                 else if (status & QTD_STS_BABBLE)
461                         return -EOVERFLOW;
462                 else if (status & QTD_STS_RCE)
463                         return -ETIME;
464                 return -EPIPE;
465         }
466         if (usb_pipein(urb->pipe)
467             && (urb->transfer_flags & URB_SHORT_NOT_OK)
468             && urb->actual_length < urb->transfer_buffer_length)
469                 return -EREMOTEIO;
470         return 0;
471 }
472
473 /**
474  * process_inactive_qtd - process an inactive (but not halted) qTD.
475  *
476  * Update the urb with the transfer bytes from the qTD, if the urb is
477  * completely transfered or (in the case of an IN only) the LPF is
478  * set, then the transfer is complete and the urb should be returned
479  * to the system.
480  */
481 void process_inactive_qtd(struct whc *whc, struct whc_qset *qset,
482                                  struct whc_qtd *qtd)
483 {
484         struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
485         struct urb *urb = std->urb;
486         uint32_t status;
487         bool complete;
488
489         status = le32_to_cpu(qtd->status);
490
491         urb->actual_length += std->len - QTD_STS_TO_LEN(status);
492
493         if (usb_pipein(urb->pipe) && (status & QTD_STS_LAST_PKT))
494                 complete = true;
495         else
496                 complete = whc_std_last(std);
497
498         qset_remove_qtd(whc, qset);
499         qset_free_std(whc, std);
500
501         /*
502          * Transfers for this URB are complete?  Then return it to the
503          * USB subsystem.
504          */
505         if (complete) {
506                 qset_remove_qtds(whc, qset, urb);
507                 qset_remove_urb(whc, qset, urb, get_urb_status_from_qtd(urb, status));
508
509                 /*
510                  * If iAlt isn't valid then the hardware didn't
511                  * advance iCur. Adjust the start and end pointers to
512                  * match iCur.
513                  */
514                 if (!(status & QTD_STS_IALT_VALID))
515                         qset->td_start = qset->td_end
516                                 = QH_STATUS_TO_ICUR(le16_to_cpu(qset->qh.status));
517                 qset->pause_after_urb = NULL;
518         }
519 }
520
521 /**
522  * process_halted_qtd - process a qset with a halted qtd
523  *
524  * Remove all the qTDs for the failed URB and return the failed URB to
525  * the USB subsystem.  Then remove all other qTDs so the qset can be
526  * removed.
527  *
528  * FIXME: this is the point where rate adaptation can be done.  If a
529  * transfer failed because it exceeded the maximum number of retries
530  * then it could be reactivated with a slower rate without having to
531  * remove the qset.
532  */
533 void process_halted_qtd(struct whc *whc, struct whc_qset *qset,
534                                struct whc_qtd *qtd)
535 {
536         struct whc_std *std = list_first_entry(&qset->stds, struct whc_std, list_node);
537         struct urb *urb = std->urb;
538         int urb_status;
539
540         urb_status = get_urb_status_from_qtd(urb, le32_to_cpu(qtd->status));
541
542         qset_remove_qtds(whc, qset, urb);
543         qset_remove_urb(whc, qset, urb, urb_status);
544
545         list_for_each_entry(std, &qset->stds, list_node) {
546                 if (qset->ntds == 0)
547                         break;
548                 qset_remove_qtd(whc, qset);
549                 std->qtd = NULL;
550         }
551
552         qset->remove = 1;
553 }
554
555 void qset_free(struct whc *whc, struct whc_qset *qset)
556 {
557         dma_pool_free(whc->qset_pool, qset, qset->qset_dma);
558 }
559
560 /**
561  * qset_delete - wait for a qset to be unused, then free it.
562  */
563 void qset_delete(struct whc *whc, struct whc_qset *qset)
564 {
565         wait_for_completion(&qset->remove_complete);
566         qset_free(whc, qset);
567 }