Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[safe/jmp/linux-2.6] / drivers / net / wireless / rt2x00 / rt2x00usb.c
1 /*
2         Copyright (C) 2004 - 2009 Ivo van Doorn <IvDoorn@gmail.com>
3         <http://rt2x00.serialmonkey.com>
4
5         This program is free software; you can redistribute it and/or modify
6         it under the terms of the GNU General Public License as published by
7         the Free Software Foundation; either version 2 of the License, or
8         (at your option) any later version.
9
10         This program is distributed in the hope that it will be useful,
11         but WITHOUT ANY WARRANTY; without even the implied warranty of
12         MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13         GNU General Public License for more details.
14
15         You should have received a copy of the GNU General Public License
16         along with this program; if not, write to the
17         Free Software Foundation, Inc.,
18         59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20
21 /*
22         Module: rt2x00usb
23         Abstract: rt2x00 generic usb device routines.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/slab.h>
29 #include <linux/usb.h>
30 #include <linux/bug.h>
31
32 #include "rt2x00.h"
33 #include "rt2x00usb.h"
34
35 /*
36  * Interfacing with the HW.
37  */
38 int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
39                              const u8 request, const u8 requesttype,
40                              const u16 offset, const u16 value,
41                              void *buffer, const u16 buffer_length,
42                              const int timeout)
43 {
44         struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
45         int status;
46         unsigned int i;
47         unsigned int pipe =
48             (requesttype == USB_VENDOR_REQUEST_IN) ?
49             usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0);
50
51         if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
52                 return -ENODEV;
53
54         for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
55                 status = usb_control_msg(usb_dev, pipe, request, requesttype,
56                                          value, offset, buffer, buffer_length,
57                                          timeout);
58                 if (status >= 0)
59                         return 0;
60
61                 /*
62                  * Check for errors
63                  * -ENODEV: Device has disappeared, no point continuing.
64                  * All other errors: Try again.
65                  */
66                 else if (status == -ENODEV) {
67                         clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
68                         break;
69                 }
70         }
71
72         ERROR(rt2x00dev,
73               "Vendor Request 0x%02x failed for offset 0x%04x with error %d.\n",
74               request, offset, status);
75
76         return status;
77 }
78 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request);
79
80 int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev,
81                                    const u8 request, const u8 requesttype,
82                                    const u16 offset, void *buffer,
83                                    const u16 buffer_length, const int timeout)
84 {
85         int status;
86
87         BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex));
88
89         /*
90          * Check for Cache availability.
91          */
92         if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) {
93                 ERROR(rt2x00dev, "CSR cache not available.\n");
94                 return -ENOMEM;
95         }
96
97         if (requesttype == USB_VENDOR_REQUEST_OUT)
98                 memcpy(rt2x00dev->csr.cache, buffer, buffer_length);
99
100         status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype,
101                                           offset, 0, rt2x00dev->csr.cache,
102                                           buffer_length, timeout);
103
104         if (!status && requesttype == USB_VENDOR_REQUEST_IN)
105                 memcpy(buffer, rt2x00dev->csr.cache, buffer_length);
106
107         return status;
108 }
109 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock);
110
111 int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev,
112                                   const u8 request, const u8 requesttype,
113                                   const u16 offset, void *buffer,
114                                   const u16 buffer_length, const int timeout)
115 {
116         int status;
117
118         mutex_lock(&rt2x00dev->csr_mutex);
119
120         status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
121                                                 requesttype, offset, buffer,
122                                                 buffer_length, timeout);
123
124         mutex_unlock(&rt2x00dev->csr_mutex);
125
126         return status;
127 }
128 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff);
129
130 int rt2x00usb_vendor_request_large_buff(struct rt2x00_dev *rt2x00dev,
131                                         const u8 request, const u8 requesttype,
132                                         const u16 offset, const void *buffer,
133                                         const u16 buffer_length,
134                                         const int timeout)
135 {
136         int status = 0;
137         unsigned char *tb;
138         u16 off, len, bsize;
139
140         mutex_lock(&rt2x00dev->csr_mutex);
141
142         tb  = (char *)buffer;
143         off = offset;
144         len = buffer_length;
145         while (len && !status) {
146                 bsize = min_t(u16, CSR_CACHE_SIZE, len);
147                 status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request,
148                                                         requesttype, off, tb,
149                                                         bsize, timeout);
150
151                 tb  += bsize;
152                 len -= bsize;
153                 off += bsize;
154         }
155
156         mutex_unlock(&rt2x00dev->csr_mutex);
157
158         return status;
159 }
160 EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
161
162 int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
163                            const unsigned int offset,
164                            const struct rt2x00_field32 field,
165                            u32 *reg)
166 {
167         unsigned int i;
168
169         if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
170                 return -ENODEV;
171
172         for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
173                 rt2x00usb_register_read_lock(rt2x00dev, offset, reg);
174                 if (!rt2x00_get_field32(*reg, field))
175                         return 1;
176                 udelay(REGISTER_BUSY_DELAY);
177         }
178
179         ERROR(rt2x00dev, "Indirect register access failed: "
180               "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
181         *reg = ~0;
182
183         return 0;
184 }
185 EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read);
186
187 /*
188  * TX data handlers.
189  */
190 static void rt2x00usb_interrupt_txdone(struct urb *urb)
191 {
192         struct queue_entry *entry = (struct queue_entry *)urb->context;
193         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
194         struct txdone_entry_desc txdesc;
195
196         if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
197             !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
198                 return;
199
200         /*
201          * Obtain the status about this packet.
202          * Note that when the status is 0 it does not mean the
203          * frame was send out correctly. It only means the frame
204          * was succesfully pushed to the hardware, we have no
205          * way to determine the transmission status right now.
206          * (Only indirectly by looking at the failed TX counters
207          * in the register).
208          */
209         txdesc.flags = 0;
210         if (!urb->status)
211                 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
212         else
213                 __set_bit(TXDONE_FAILURE, &txdesc.flags);
214         txdesc.retry = 0;
215
216         rt2x00lib_txdone(entry, &txdesc);
217 }
218
219 int rt2x00usb_write_tx_data(struct queue_entry *entry,
220                             struct txentry_desc *txdesc)
221 {
222         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
223         struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
224         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
225         u32 length;
226
227         /*
228          * Add the descriptor in front of the skb.
229          */
230         skb_push(entry->skb, entry->queue->desc_size);
231         memset(entry->skb->data, 0, entry->queue->desc_size);
232
233         /*
234          * USB devices cannot blindly pass the skb->len as the
235          * length of the data to usb_fill_bulk_urb. Pass the skb
236          * to the driver to determine what the length should be.
237          */
238         length = rt2x00dev->ops->lib->get_tx_data_len(entry);
239
240         usb_fill_bulk_urb(entry_priv->urb, usb_dev,
241                           usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
242                           entry->skb->data, length,
243                           rt2x00usb_interrupt_txdone, entry);
244
245         /*
246          * Make sure the skb->data pointer points to the frame, not the
247          * descriptor.
248          */
249         skb_pull(entry->skb, entry->queue->desc_size);
250
251         return 0;
252 }
253 EXPORT_SYMBOL_GPL(rt2x00usb_write_tx_data);
254
255 static inline void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
256 {
257         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
258
259         if (test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags))
260                 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
261 }
262
263 void rt2x00usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
264                              const enum data_queue_qid qid)
265 {
266         struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
267         unsigned long irqflags;
268         unsigned int index;
269         unsigned int index_done;
270         unsigned int i;
271
272         /*
273          * Only protect the range we are going to loop over,
274          * if during our loop a extra entry is set to pending
275          * it should not be kicked during this run, since it
276          * is part of another TX operation.
277          */
278         spin_lock_irqsave(&queue->lock, irqflags);
279         index = queue->index[Q_INDEX];
280         index_done = queue->index[Q_INDEX_DONE];
281         spin_unlock_irqrestore(&queue->lock, irqflags);
282
283         /*
284          * Start from the TX done pointer, this guarentees that we will
285          * send out all frames in the correct order.
286          */
287         if (index_done < index) {
288                 for (i = index_done; i < index; i++)
289                         rt2x00usb_kick_tx_entry(&queue->entries[i]);
290         } else {
291                 for (i = index_done; i < queue->limit; i++)
292                         rt2x00usb_kick_tx_entry(&queue->entries[i]);
293
294                 for (i = 0; i < index; i++)
295                         rt2x00usb_kick_tx_entry(&queue->entries[i]);
296         }
297 }
298 EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
299
300 void rt2x00usb_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
301                              const enum data_queue_qid qid)
302 {
303         struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid);
304         struct queue_entry_priv_usb *entry_priv;
305         struct queue_entry_priv_usb_bcn *bcn_priv;
306         unsigned int i;
307         bool kill_guard;
308
309         /*
310          * When killing the beacon queue, we must also kill
311          * the beacon guard byte.
312          */
313         kill_guard =
314             (qid == QID_BEACON) &&
315             (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags));
316
317         /*
318          * Cancel all entries.
319          */
320         for (i = 0; i < queue->limit; i++) {
321                 entry_priv = queue->entries[i].priv_data;
322                 usb_kill_urb(entry_priv->urb);
323
324                 /*
325                  * Kill guardian urb (if required by driver).
326                  */
327                 if (kill_guard) {
328                         bcn_priv = queue->entries[i].priv_data;
329                         usb_kill_urb(bcn_priv->guardian_urb);
330                 }
331         }
332 }
333 EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
334
335 /*
336  * RX data handlers.
337  */
338 static void rt2x00usb_interrupt_rxdone(struct urb *urb)
339 {
340         struct queue_entry *entry = (struct queue_entry *)urb->context;
341         struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
342         struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
343         u8 rxd[32];
344
345         if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) ||
346             !test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
347                 return;
348
349         /*
350          * Check if the received data is simply too small
351          * to be actually valid, or if the urb is signaling
352          * a problem.
353          */
354         if (urb->actual_length < entry->queue->desc_size || urb->status) {
355                 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
356                 usb_submit_urb(urb, GFP_ATOMIC);
357                 return;
358         }
359
360         /*
361          * Fill in desc fields of the skb descriptor
362          */
363         skbdesc->desc = rxd;
364         skbdesc->desc_len = entry->queue->desc_size;
365
366         /*
367          * Send the frame to rt2x00lib for further processing.
368          */
369         rt2x00lib_rxdone(rt2x00dev, entry);
370 }
371
372 /*
373  * Radio handlers
374  */
375 void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
376 {
377         rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
378                                     REGISTER_TIMEOUT);
379
380         /*
381          * The USB version of kill_tx_queue also works
382          * on the RX queue.
383          */
384         rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev, QID_RX);
385 }
386 EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
387
388 /*
389  * Device initialization handlers.
390  */
391 void rt2x00usb_clear_entry(struct queue_entry *entry)
392 {
393         struct usb_device *usb_dev =
394             to_usb_device_intf(entry->queue->rt2x00dev->dev);
395         struct queue_entry_priv_usb *entry_priv = entry->priv_data;
396         int pipe;
397
398         if (entry->queue->qid == QID_RX) {
399                 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint);
400                 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
401                                 entry->skb->data, entry->skb->len,
402                                 rt2x00usb_interrupt_rxdone, entry);
403
404                 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
405                 usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
406         } else {
407                 entry->flags = 0;
408         }
409 }
410 EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
411
412 static void rt2x00usb_assign_endpoint(struct data_queue *queue,
413                                       struct usb_endpoint_descriptor *ep_desc)
414 {
415         struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev);
416         int pipe;
417
418         queue->usb_endpoint = usb_endpoint_num(ep_desc);
419
420         if (queue->qid == QID_RX) {
421                 pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint);
422                 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 0);
423         } else {
424                 pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint);
425                 queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe, 1);
426         }
427
428         if (!queue->usb_maxpacket)
429                 queue->usb_maxpacket = 1;
430 }
431
432 static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev)
433 {
434         struct usb_interface *intf = to_usb_interface(rt2x00dev->dev);
435         struct usb_host_interface *intf_desc = intf->cur_altsetting;
436         struct usb_endpoint_descriptor *ep_desc;
437         struct data_queue *queue = rt2x00dev->tx;
438         struct usb_endpoint_descriptor *tx_ep_desc = NULL;
439         unsigned int i;
440
441         /*
442          * Walk through all available endpoints to search for "bulk in"
443          * and "bulk out" endpoints. When we find such endpoints collect
444          * the information we need from the descriptor and assign it
445          * to the queue.
446          */
447         for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) {
448                 ep_desc = &intf_desc->endpoint[i].desc;
449
450                 if (usb_endpoint_is_bulk_in(ep_desc)) {
451                         rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc);
452                 } else if (usb_endpoint_is_bulk_out(ep_desc) &&
453                            (queue != queue_end(rt2x00dev))) {
454                         rt2x00usb_assign_endpoint(queue, ep_desc);
455                         queue = queue_next(queue);
456
457                         tx_ep_desc = ep_desc;
458                 }
459         }
460
461         /*
462          * At least 1 endpoint for RX and 1 endpoint for TX must be available.
463          */
464         if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) {
465                 ERROR(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n");
466                 return -EPIPE;
467         }
468
469         /*
470          * It might be possible not all queues have a dedicated endpoint.
471          * Loop through all TX queues and copy the endpoint information
472          * which we have gathered from already assigned endpoints.
473          */
474         txall_queue_for_each(rt2x00dev, queue) {
475                 if (!queue->usb_endpoint)
476                         rt2x00usb_assign_endpoint(queue, tx_ep_desc);
477         }
478
479         return 0;
480 }
481
482 static int rt2x00usb_alloc_urb(struct rt2x00_dev *rt2x00dev,
483                                struct data_queue *queue)
484 {
485         struct queue_entry_priv_usb *entry_priv;
486         struct queue_entry_priv_usb_bcn *bcn_priv;
487         unsigned int i;
488
489         for (i = 0; i < queue->limit; i++) {
490                 entry_priv = queue->entries[i].priv_data;
491                 entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL);
492                 if (!entry_priv->urb)
493                         return -ENOMEM;
494         }
495
496         /*
497          * If this is not the beacon queue or
498          * no guardian byte was required for the beacon,
499          * then we are done.
500          */
501         if (rt2x00dev->bcn != queue ||
502             !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
503                 return 0;
504
505         for (i = 0; i < queue->limit; i++) {
506                 bcn_priv = queue->entries[i].priv_data;
507                 bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL);
508                 if (!bcn_priv->guardian_urb)
509                         return -ENOMEM;
510         }
511
512         return 0;
513 }
514
515 static void rt2x00usb_free_urb(struct rt2x00_dev *rt2x00dev,
516                                struct data_queue *queue)
517 {
518         struct queue_entry_priv_usb *entry_priv;
519         struct queue_entry_priv_usb_bcn *bcn_priv;
520         unsigned int i;
521
522         if (!queue->entries)
523                 return;
524
525         for (i = 0; i < queue->limit; i++) {
526                 entry_priv = queue->entries[i].priv_data;
527                 usb_kill_urb(entry_priv->urb);
528                 usb_free_urb(entry_priv->urb);
529         }
530
531         /*
532          * If this is not the beacon queue or
533          * no guardian byte was required for the beacon,
534          * then we are done.
535          */
536         if (rt2x00dev->bcn != queue ||
537             !test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags))
538                 return;
539
540         for (i = 0; i < queue->limit; i++) {
541                 bcn_priv = queue->entries[i].priv_data;
542                 usb_kill_urb(bcn_priv->guardian_urb);
543                 usb_free_urb(bcn_priv->guardian_urb);
544         }
545 }
546
547 int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev)
548 {
549         struct data_queue *queue;
550         int status;
551
552         /*
553          * Find endpoints for each queue
554          */
555         status = rt2x00usb_find_endpoints(rt2x00dev);
556         if (status)
557                 goto exit;
558
559         /*
560          * Allocate DMA
561          */
562         queue_for_each(rt2x00dev, queue) {
563                 status = rt2x00usb_alloc_urb(rt2x00dev, queue);
564                 if (status)
565                         goto exit;
566         }
567
568         return 0;
569
570 exit:
571         rt2x00usb_uninitialize(rt2x00dev);
572
573         return status;
574 }
575 EXPORT_SYMBOL_GPL(rt2x00usb_initialize);
576
577 void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev)
578 {
579         struct data_queue *queue;
580
581         queue_for_each(rt2x00dev, queue)
582                 rt2x00usb_free_urb(rt2x00dev, queue);
583 }
584 EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize);
585
586 /*
587  * USB driver handlers.
588  */
589 static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev)
590 {
591         kfree(rt2x00dev->rf);
592         rt2x00dev->rf = NULL;
593
594         kfree(rt2x00dev->eeprom);
595         rt2x00dev->eeprom = NULL;
596
597         kfree(rt2x00dev->csr.cache);
598         rt2x00dev->csr.cache = NULL;
599 }
600
601 static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev)
602 {
603         rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL);
604         if (!rt2x00dev->csr.cache)
605                 goto exit;
606
607         rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
608         if (!rt2x00dev->eeprom)
609                 goto exit;
610
611         rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
612         if (!rt2x00dev->rf)
613                 goto exit;
614
615         return 0;
616
617 exit:
618         ERROR_PROBE("Failed to allocate registers.\n");
619
620         rt2x00usb_free_reg(rt2x00dev);
621
622         return -ENOMEM;
623 }
624
625 int rt2x00usb_probe(struct usb_interface *usb_intf,
626                     const struct usb_device_id *id)
627 {
628         struct usb_device *usb_dev = interface_to_usbdev(usb_intf);
629         struct rt2x00_ops *ops = (struct rt2x00_ops *)id->driver_info;
630         struct ieee80211_hw *hw;
631         struct rt2x00_dev *rt2x00dev;
632         int retval;
633
634         usb_dev = usb_get_dev(usb_dev);
635
636         hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
637         if (!hw) {
638                 ERROR_PROBE("Failed to allocate hardware.\n");
639                 retval = -ENOMEM;
640                 goto exit_put_device;
641         }
642
643         usb_set_intfdata(usb_intf, hw);
644
645         rt2x00dev = hw->priv;
646         rt2x00dev->dev = &usb_intf->dev;
647         rt2x00dev->ops = ops;
648         rt2x00dev->hw = hw;
649
650         rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
651
652         retval = rt2x00usb_alloc_reg(rt2x00dev);
653         if (retval)
654                 goto exit_free_device;
655
656         retval = rt2x00lib_probe_dev(rt2x00dev);
657         if (retval)
658                 goto exit_free_reg;
659
660         return 0;
661
662 exit_free_reg:
663         rt2x00usb_free_reg(rt2x00dev);
664
665 exit_free_device:
666         ieee80211_free_hw(hw);
667
668 exit_put_device:
669         usb_put_dev(usb_dev);
670
671         usb_set_intfdata(usb_intf, NULL);
672
673         return retval;
674 }
675 EXPORT_SYMBOL_GPL(rt2x00usb_probe);
676
677 void rt2x00usb_disconnect(struct usb_interface *usb_intf)
678 {
679         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
680         struct rt2x00_dev *rt2x00dev = hw->priv;
681
682         /*
683          * Free all allocated data.
684          */
685         rt2x00lib_remove_dev(rt2x00dev);
686         rt2x00usb_free_reg(rt2x00dev);
687         ieee80211_free_hw(hw);
688
689         /*
690          * Free the USB device data.
691          */
692         usb_set_intfdata(usb_intf, NULL);
693         usb_put_dev(interface_to_usbdev(usb_intf));
694 }
695 EXPORT_SYMBOL_GPL(rt2x00usb_disconnect);
696
697 #ifdef CONFIG_PM
698 int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state)
699 {
700         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
701         struct rt2x00_dev *rt2x00dev = hw->priv;
702         int retval;
703
704         retval = rt2x00lib_suspend(rt2x00dev, state);
705         if (retval)
706                 return retval;
707
708         /*
709          * Decrease usbdev refcount.
710          */
711         usb_put_dev(interface_to_usbdev(usb_intf));
712
713         return 0;
714 }
715 EXPORT_SYMBOL_GPL(rt2x00usb_suspend);
716
717 int rt2x00usb_resume(struct usb_interface *usb_intf)
718 {
719         struct ieee80211_hw *hw = usb_get_intfdata(usb_intf);
720         struct rt2x00_dev *rt2x00dev = hw->priv;
721
722         usb_get_dev(interface_to_usbdev(usb_intf));
723
724         return rt2x00lib_resume(rt2x00dev);
725 }
726 EXPORT_SYMBOL_GPL(rt2x00usb_resume);
727 #endif /* CONFIG_PM */
728
729 /*
730  * rt2x00usb module information.
731  */
732 MODULE_AUTHOR(DRV_PROJECT);
733 MODULE_VERSION(DRV_VERSION);
734 MODULE_DESCRIPTION("rt2x00 usb library");
735 MODULE_LICENSE("GPL");