Bluetooth: Fix crash when monitor timeout expires
[safe/jmp/linux-2.6] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/interrupt.h>
41 #include <linux/notifier.h>
42 #include <linux/rfkill.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51
52 static void hci_cmd_task(unsigned long arg);
53 static void hci_rx_task(unsigned long arg);
54 static void hci_tx_task(unsigned long arg);
55 static void hci_notify(struct hci_dev *hdev, int event);
56
57 static DEFINE_RWLOCK(hci_task_lock);
58
59 /* HCI device list */
60 LIST_HEAD(hci_dev_list);
61 DEFINE_RWLOCK(hci_dev_list_lock);
62
63 /* HCI callback list */
64 LIST_HEAD(hci_cb_list);
65 DEFINE_RWLOCK(hci_cb_list_lock);
66
67 /* HCI protocols */
68 #define HCI_MAX_PROTO   2
69 struct hci_proto *hci_proto[HCI_MAX_PROTO];
70
71 /* HCI notifiers list */
72 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
73
74 /* ---- HCI notifications ---- */
75
76 int hci_register_notifier(struct notifier_block *nb)
77 {
78         return atomic_notifier_chain_register(&hci_notifier, nb);
79 }
80
81 int hci_unregister_notifier(struct notifier_block *nb)
82 {
83         return atomic_notifier_chain_unregister(&hci_notifier, nb);
84 }
85
86 static void hci_notify(struct hci_dev *hdev, int event)
87 {
88         atomic_notifier_call_chain(&hci_notifier, event, hdev);
89 }
90
91 /* ---- HCI requests ---- */
92
93 void hci_req_complete(struct hci_dev *hdev, int result)
94 {
95         BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97         if (hdev->req_status == HCI_REQ_PEND) {
98                 hdev->req_result = result;
99                 hdev->req_status = HCI_REQ_DONE;
100                 wake_up_interruptible(&hdev->req_wait_q);
101         }
102 }
103
104 static void hci_req_cancel(struct hci_dev *hdev, int err)
105 {
106         BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = err;
110                 hdev->req_status = HCI_REQ_CANCELED;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 /* Execute request and wait for completion. */
116 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
117                                 unsigned long opt, __u32 timeout)
118 {
119         DECLARE_WAITQUEUE(wait, current);
120         int err = 0;
121
122         BT_DBG("%s start", hdev->name);
123
124         hdev->req_status = HCI_REQ_PEND;
125
126         add_wait_queue(&hdev->req_wait_q, &wait);
127         set_current_state(TASK_INTERRUPTIBLE);
128
129         req(hdev, opt);
130         schedule_timeout(timeout);
131
132         remove_wait_queue(&hdev->req_wait_q, &wait);
133
134         if (signal_pending(current))
135                 return -EINTR;
136
137         switch (hdev->req_status) {
138         case HCI_REQ_DONE:
139                 err = -bt_err(hdev->req_result);
140                 break;
141
142         case HCI_REQ_CANCELED:
143                 err = -hdev->req_result;
144                 break;
145
146         default:
147                 err = -ETIMEDOUT;
148                 break;
149         }
150
151         hdev->req_status = hdev->req_result = 0;
152
153         BT_DBG("%s end: err %d", hdev->name, err);
154
155         return err;
156 }
157
158 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
159                                 unsigned long opt, __u32 timeout)
160 {
161         int ret;
162
163         if (!test_bit(HCI_UP, &hdev->flags))
164                 return -ENETDOWN;
165
166         /* Serialize all requests */
167         hci_req_lock(hdev);
168         ret = __hci_request(hdev, req, opt, timeout);
169         hci_req_unlock(hdev);
170
171         return ret;
172 }
173
174 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
175 {
176         BT_DBG("%s %ld", hdev->name, opt);
177
178         /* Reset device */
179         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
180 }
181
182 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         struct sk_buff *skb;
185         __le16 param;
186         __u8 flt_type;
187
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Driver initialization */
191
192         /* Special commands */
193         while ((skb = skb_dequeue(&hdev->driver_init))) {
194                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
195                 skb->dev = (void *) hdev;
196
197                 skb_queue_tail(&hdev->cmd_q, skb);
198                 tasklet_schedule(&hdev->cmd_task);
199         }
200         skb_queue_purge(&hdev->driver_init);
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
206                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207
208         /* Read Local Supported Features */
209         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
210
211         /* Read Local Version */
212         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
213
214         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
215         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
216
217 #if 0
218         /* Host buffer size */
219         {
220                 struct hci_cp_host_buffer_size cp;
221                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
222                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
223                 cp.acl_max_pkt = cpu_to_le16(0xffff);
224                 cp.sco_max_pkt = cpu_to_le16(0xffff);
225                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
226         }
227 #endif
228
229         /* Read BD Address */
230         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
231
232         /* Read Class of Device */
233         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
234
235         /* Read Local Name */
236         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
237
238         /* Read Voice Setting */
239         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
240
241         /* Optional initialization */
242
243         /* Clear Event Filters */
244         flt_type = HCI_FLT_CLEAR_ALL;
245         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
246
247         /* Page timeout ~20 secs */
248         param = cpu_to_le16(0x8000);
249         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
250
251         /* Connection accept timeout ~20 secs */
252         param = cpu_to_le16(0x7d00);
253         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254 }
255
256 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
257 {
258         __u8 scan = opt;
259
260         BT_DBG("%s %x", hdev->name, scan);
261
262         /* Inquiry and Page scans */
263         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
264 }
265
266 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
267 {
268         __u8 auth = opt;
269
270         BT_DBG("%s %x", hdev->name, auth);
271
272         /* Authentication */
273         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
274 }
275
276 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         __u8 encrypt = opt;
279
280         BT_DBG("%s %x", hdev->name, encrypt);
281
282         /* Encryption */
283         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
284 }
285
286 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
287 {
288         __le16 policy = cpu_to_le16(opt);
289
290         BT_DBG("%s %x", hdev->name, policy);
291
292         /* Default link policy */
293         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
294 }
295
296 /* Get HCI device by index.
297  * Device is held on return. */
298 struct hci_dev *hci_dev_get(int index)
299 {
300         struct hci_dev *hdev = NULL;
301         struct list_head *p;
302
303         BT_DBG("%d", index);
304
305         if (index < 0)
306                 return NULL;
307
308         read_lock(&hci_dev_list_lock);
309         list_for_each(p, &hci_dev_list) {
310                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
311                 if (d->id == index) {
312                         hdev = hci_dev_hold(d);
313                         break;
314                 }
315         }
316         read_unlock(&hci_dev_list_lock);
317         return hdev;
318 }
319
320 /* ---- Inquiry support ---- */
321 static void inquiry_cache_flush(struct hci_dev *hdev)
322 {
323         struct inquiry_cache *cache = &hdev->inq_cache;
324         struct inquiry_entry *next  = cache->list, *e;
325
326         BT_DBG("cache %p", cache);
327
328         cache->list = NULL;
329         while ((e = next)) {
330                 next = e->next;
331                 kfree(e);
332         }
333 }
334
335 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
336 {
337         struct inquiry_cache *cache = &hdev->inq_cache;
338         struct inquiry_entry *e;
339
340         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
341
342         for (e = cache->list; e; e = e->next)
343                 if (!bacmp(&e->data.bdaddr, bdaddr))
344                         break;
345         return e;
346 }
347
348 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
349 {
350         struct inquiry_cache *cache = &hdev->inq_cache;
351         struct inquiry_entry *e;
352
353         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
354
355         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
356                 /* Entry not in the cache. Add new one. */
357                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
358                         return;
359                 e->next     = cache->list;
360                 cache->list = e;
361         }
362
363         memcpy(&e->data, data, sizeof(*data));
364         e->timestamp = jiffies;
365         cache->timestamp = jiffies;
366 }
367
368 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
369 {
370         struct inquiry_cache *cache = &hdev->inq_cache;
371         struct inquiry_info *info = (struct inquiry_info *) buf;
372         struct inquiry_entry *e;
373         int copied = 0;
374
375         for (e = cache->list; e && copied < num; e = e->next, copied++) {
376                 struct inquiry_data *data = &e->data;
377                 bacpy(&info->bdaddr, &data->bdaddr);
378                 info->pscan_rep_mode    = data->pscan_rep_mode;
379                 info->pscan_period_mode = data->pscan_period_mode;
380                 info->pscan_mode        = data->pscan_mode;
381                 memcpy(info->dev_class, data->dev_class, 3);
382                 info->clock_offset      = data->clock_offset;
383                 info++;
384         }
385
386         BT_DBG("cache %p, copied %d", cache, copied);
387         return copied;
388 }
389
390 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
391 {
392         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
393         struct hci_cp_inquiry cp;
394
395         BT_DBG("%s", hdev->name);
396
397         if (test_bit(HCI_INQUIRY, &hdev->flags))
398                 return;
399
400         /* Start Inquiry */
401         memcpy(&cp.lap, &ir->lap, 3);
402         cp.length  = ir->length;
403         cp.num_rsp = ir->num_rsp;
404         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
405 }
406
407 int hci_inquiry(void __user *arg)
408 {
409         __u8 __user *ptr = arg;
410         struct hci_inquiry_req ir;
411         struct hci_dev *hdev;
412         int err = 0, do_inquiry = 0, max_rsp;
413         long timeo;
414         __u8 *buf;
415
416         if (copy_from_user(&ir, ptr, sizeof(ir)))
417                 return -EFAULT;
418
419         if (!(hdev = hci_dev_get(ir.dev_id)))
420                 return -ENODEV;
421
422         hci_dev_lock_bh(hdev);
423         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
424                                         inquiry_cache_empty(hdev) ||
425                                         ir.flags & IREQ_CACHE_FLUSH) {
426                 inquiry_cache_flush(hdev);
427                 do_inquiry = 1;
428         }
429         hci_dev_unlock_bh(hdev);
430
431         timeo = ir.length * msecs_to_jiffies(2000);
432         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
433                 goto done;
434
435         /* for unlimited number of responses we will use buffer with 255 entries */
436         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
437
438         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
439          * copy it to the user space.
440          */
441         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
442                 err = -ENOMEM;
443                 goto done;
444         }
445
446         hci_dev_lock_bh(hdev);
447         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
448         hci_dev_unlock_bh(hdev);
449
450         BT_DBG("num_rsp %d", ir.num_rsp);
451
452         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
453                 ptr += sizeof(ir);
454                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
455                                         ir.num_rsp))
456                         err = -EFAULT;
457         } else
458                 err = -EFAULT;
459
460         kfree(buf);
461
462 done:
463         hci_dev_put(hdev);
464         return err;
465 }
466
467 /* ---- HCI ioctl helpers ---- */
468
469 int hci_dev_open(__u16 dev)
470 {
471         struct hci_dev *hdev;
472         int ret = 0;
473
474         if (!(hdev = hci_dev_get(dev)))
475                 return -ENODEV;
476
477         BT_DBG("%s %p", hdev->name, hdev);
478
479         hci_req_lock(hdev);
480
481         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
482                 ret = -ERFKILL;
483                 goto done;
484         }
485
486         if (test_bit(HCI_UP, &hdev->flags)) {
487                 ret = -EALREADY;
488                 goto done;
489         }
490
491         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
492                 set_bit(HCI_RAW, &hdev->flags);
493
494         /* Treat all non BR/EDR controllers as raw devices for now */
495         if (hdev->dev_type != HCI_BREDR)
496                 set_bit(HCI_RAW, &hdev->flags);
497
498         if (hdev->open(hdev)) {
499                 ret = -EIO;
500                 goto done;
501         }
502
503         if (!test_bit(HCI_RAW, &hdev->flags)) {
504                 atomic_set(&hdev->cmd_cnt, 1);
505                 set_bit(HCI_INIT, &hdev->flags);
506
507                 //__hci_request(hdev, hci_reset_req, 0, HZ);
508                 ret = __hci_request(hdev, hci_init_req, 0,
509                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
510
511                 clear_bit(HCI_INIT, &hdev->flags);
512         }
513
514         if (!ret) {
515                 hci_dev_hold(hdev);
516                 set_bit(HCI_UP, &hdev->flags);
517                 hci_notify(hdev, HCI_DEV_UP);
518         } else {
519                 /* Init failed, cleanup */
520                 tasklet_kill(&hdev->rx_task);
521                 tasklet_kill(&hdev->tx_task);
522                 tasklet_kill(&hdev->cmd_task);
523
524                 skb_queue_purge(&hdev->cmd_q);
525                 skb_queue_purge(&hdev->rx_q);
526
527                 if (hdev->flush)
528                         hdev->flush(hdev);
529
530                 if (hdev->sent_cmd) {
531                         kfree_skb(hdev->sent_cmd);
532                         hdev->sent_cmd = NULL;
533                 }
534
535                 hdev->close(hdev);
536                 hdev->flags = 0;
537         }
538
539 done:
540         hci_req_unlock(hdev);
541         hci_dev_put(hdev);
542         return ret;
543 }
544
545 static int hci_dev_do_close(struct hci_dev *hdev)
546 {
547         BT_DBG("%s %p", hdev->name, hdev);
548
549         hci_req_cancel(hdev, ENODEV);
550         hci_req_lock(hdev);
551
552         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
553                 hci_req_unlock(hdev);
554                 return 0;
555         }
556
557         /* Kill RX and TX tasks */
558         tasklet_kill(&hdev->rx_task);
559         tasklet_kill(&hdev->tx_task);
560
561         hci_dev_lock_bh(hdev);
562         inquiry_cache_flush(hdev);
563         hci_conn_hash_flush(hdev);
564         hci_dev_unlock_bh(hdev);
565
566         hci_notify(hdev, HCI_DEV_DOWN);
567
568         if (hdev->flush)
569                 hdev->flush(hdev);
570
571         /* Reset device */
572         skb_queue_purge(&hdev->cmd_q);
573         atomic_set(&hdev->cmd_cnt, 1);
574         if (!test_bit(HCI_RAW, &hdev->flags)) {
575                 set_bit(HCI_INIT, &hdev->flags);
576                 __hci_request(hdev, hci_reset_req, 0,
577                                         msecs_to_jiffies(250));
578                 clear_bit(HCI_INIT, &hdev->flags);
579         }
580
581         /* Kill cmd task */
582         tasklet_kill(&hdev->cmd_task);
583
584         /* Drop queues */
585         skb_queue_purge(&hdev->rx_q);
586         skb_queue_purge(&hdev->cmd_q);
587         skb_queue_purge(&hdev->raw_q);
588
589         /* Drop last sent command */
590         if (hdev->sent_cmd) {
591                 kfree_skb(hdev->sent_cmd);
592                 hdev->sent_cmd = NULL;
593         }
594
595         /* After this point our queues are empty
596          * and no tasks are scheduled. */
597         hdev->close(hdev);
598
599         /* Clear flags */
600         hdev->flags = 0;
601
602         hci_req_unlock(hdev);
603
604         hci_dev_put(hdev);
605         return 0;
606 }
607
608 int hci_dev_close(__u16 dev)
609 {
610         struct hci_dev *hdev;
611         int err;
612
613         if (!(hdev = hci_dev_get(dev)))
614                 return -ENODEV;
615         err = hci_dev_do_close(hdev);
616         hci_dev_put(hdev);
617         return err;
618 }
619
620 int hci_dev_reset(__u16 dev)
621 {
622         struct hci_dev *hdev;
623         int ret = 0;
624
625         if (!(hdev = hci_dev_get(dev)))
626                 return -ENODEV;
627
628         hci_req_lock(hdev);
629         tasklet_disable(&hdev->tx_task);
630
631         if (!test_bit(HCI_UP, &hdev->flags))
632                 goto done;
633
634         /* Drop queues */
635         skb_queue_purge(&hdev->rx_q);
636         skb_queue_purge(&hdev->cmd_q);
637
638         hci_dev_lock_bh(hdev);
639         inquiry_cache_flush(hdev);
640         hci_conn_hash_flush(hdev);
641         hci_dev_unlock_bh(hdev);
642
643         if (hdev->flush)
644                 hdev->flush(hdev);
645
646         atomic_set(&hdev->cmd_cnt, 1);
647         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
648
649         if (!test_bit(HCI_RAW, &hdev->flags))
650                 ret = __hci_request(hdev, hci_reset_req, 0,
651                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
652
653 done:
654         tasklet_enable(&hdev->tx_task);
655         hci_req_unlock(hdev);
656         hci_dev_put(hdev);
657         return ret;
658 }
659
660 int hci_dev_reset_stat(__u16 dev)
661 {
662         struct hci_dev *hdev;
663         int ret = 0;
664
665         if (!(hdev = hci_dev_get(dev)))
666                 return -ENODEV;
667
668         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
669
670         hci_dev_put(hdev);
671
672         return ret;
673 }
674
675 int hci_dev_cmd(unsigned int cmd, void __user *arg)
676 {
677         struct hci_dev *hdev;
678         struct hci_dev_req dr;
679         int err = 0;
680
681         if (copy_from_user(&dr, arg, sizeof(dr)))
682                 return -EFAULT;
683
684         if (!(hdev = hci_dev_get(dr.dev_id)))
685                 return -ENODEV;
686
687         switch (cmd) {
688         case HCISETAUTH:
689                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
690                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
691                 break;
692
693         case HCISETENCRYPT:
694                 if (!lmp_encrypt_capable(hdev)) {
695                         err = -EOPNOTSUPP;
696                         break;
697                 }
698
699                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
700                         /* Auth must be enabled first */
701                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
702                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
703                         if (err)
704                                 break;
705                 }
706
707                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
708                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
709                 break;
710
711         case HCISETSCAN:
712                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
713                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
714                 break;
715
716         case HCISETLINKPOL:
717                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
718                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
719                 break;
720
721         case HCISETLINKMODE:
722                 hdev->link_mode = ((__u16) dr.dev_opt) &
723                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
724                 break;
725
726         case HCISETPTYPE:
727                 hdev->pkt_type = (__u16) dr.dev_opt;
728                 break;
729
730         case HCISETACLMTU:
731                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
732                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
733                 break;
734
735         case HCISETSCOMTU:
736                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
737                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
738                 break;
739
740         default:
741                 err = -EINVAL;
742                 break;
743         }
744
745         hci_dev_put(hdev);
746         return err;
747 }
748
749 int hci_get_dev_list(void __user *arg)
750 {
751         struct hci_dev_list_req *dl;
752         struct hci_dev_req *dr;
753         struct list_head *p;
754         int n = 0, size, err;
755         __u16 dev_num;
756
757         if (get_user(dev_num, (__u16 __user *) arg))
758                 return -EFAULT;
759
760         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
761                 return -EINVAL;
762
763         size = sizeof(*dl) + dev_num * sizeof(*dr);
764
765         if (!(dl = kzalloc(size, GFP_KERNEL)))
766                 return -ENOMEM;
767
768         dr = dl->dev_req;
769
770         read_lock_bh(&hci_dev_list_lock);
771         list_for_each(p, &hci_dev_list) {
772                 struct hci_dev *hdev;
773                 hdev = list_entry(p, struct hci_dev, list);
774                 (dr + n)->dev_id  = hdev->id;
775                 (dr + n)->dev_opt = hdev->flags;
776                 if (++n >= dev_num)
777                         break;
778         }
779         read_unlock_bh(&hci_dev_list_lock);
780
781         dl->dev_num = n;
782         size = sizeof(*dl) + n * sizeof(*dr);
783
784         err = copy_to_user(arg, dl, size);
785         kfree(dl);
786
787         return err ? -EFAULT : 0;
788 }
789
790 int hci_get_dev_info(void __user *arg)
791 {
792         struct hci_dev *hdev;
793         struct hci_dev_info di;
794         int err = 0;
795
796         if (copy_from_user(&di, arg, sizeof(di)))
797                 return -EFAULT;
798
799         if (!(hdev = hci_dev_get(di.dev_id)))
800                 return -ENODEV;
801
802         strcpy(di.name, hdev->name);
803         di.bdaddr   = hdev->bdaddr;
804         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
805         di.flags    = hdev->flags;
806         di.pkt_type = hdev->pkt_type;
807         di.acl_mtu  = hdev->acl_mtu;
808         di.acl_pkts = hdev->acl_pkts;
809         di.sco_mtu  = hdev->sco_mtu;
810         di.sco_pkts = hdev->sco_pkts;
811         di.link_policy = hdev->link_policy;
812         di.link_mode   = hdev->link_mode;
813
814         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
815         memcpy(&di.features, &hdev->features, sizeof(di.features));
816
817         if (copy_to_user(arg, &di, sizeof(di)))
818                 err = -EFAULT;
819
820         hci_dev_put(hdev);
821
822         return err;
823 }
824
825 /* ---- Interface to HCI drivers ---- */
826
827 static int hci_rfkill_set_block(void *data, bool blocked)
828 {
829         struct hci_dev *hdev = data;
830
831         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
832
833         if (!blocked)
834                 return 0;
835
836         hci_dev_do_close(hdev);
837
838         return 0;
839 }
840
841 static const struct rfkill_ops hci_rfkill_ops = {
842         .set_block = hci_rfkill_set_block,
843 };
844
845 /* Alloc HCI device */
846 struct hci_dev *hci_alloc_dev(void)
847 {
848         struct hci_dev *hdev;
849
850         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
851         if (!hdev)
852                 return NULL;
853
854         skb_queue_head_init(&hdev->driver_init);
855
856         return hdev;
857 }
858 EXPORT_SYMBOL(hci_alloc_dev);
859
860 /* Free HCI device */
861 void hci_free_dev(struct hci_dev *hdev)
862 {
863         skb_queue_purge(&hdev->driver_init);
864
865         /* will free via device release */
866         put_device(&hdev->dev);
867 }
868 EXPORT_SYMBOL(hci_free_dev);
869
870 /* Register HCI device */
871 int hci_register_dev(struct hci_dev *hdev)
872 {
873         struct list_head *head = &hci_dev_list, *p;
874         int i, id = 0;
875
876         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
877                                                 hdev->bus, hdev->owner);
878
879         if (!hdev->open || !hdev->close || !hdev->destruct)
880                 return -EINVAL;
881
882         write_lock_bh(&hci_dev_list_lock);
883
884         /* Find first available device id */
885         list_for_each(p, &hci_dev_list) {
886                 if (list_entry(p, struct hci_dev, list)->id != id)
887                         break;
888                 head = p; id++;
889         }
890
891         sprintf(hdev->name, "hci%d", id);
892         hdev->id = id;
893         list_add(&hdev->list, head);
894
895         atomic_set(&hdev->refcnt, 1);
896         spin_lock_init(&hdev->lock);
897
898         hdev->flags = 0;
899         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
900         hdev->esco_type = (ESCO_HV1);
901         hdev->link_mode = (HCI_LM_ACCEPT);
902
903         hdev->idle_timeout = 0;
904         hdev->sniff_max_interval = 800;
905         hdev->sniff_min_interval = 80;
906
907         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
908         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
909         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
910
911         skb_queue_head_init(&hdev->rx_q);
912         skb_queue_head_init(&hdev->cmd_q);
913         skb_queue_head_init(&hdev->raw_q);
914
915         for (i = 0; i < 3; i++)
916                 hdev->reassembly[i] = NULL;
917
918         init_waitqueue_head(&hdev->req_wait_q);
919         mutex_init(&hdev->req_lock);
920
921         inquiry_cache_init(hdev);
922
923         hci_conn_hash_init(hdev);
924
925         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
926
927         atomic_set(&hdev->promisc, 0);
928
929         write_unlock_bh(&hci_dev_list_lock);
930
931         hci_register_sysfs(hdev);
932
933         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
934                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
935         if (hdev->rfkill) {
936                 if (rfkill_register(hdev->rfkill) < 0) {
937                         rfkill_destroy(hdev->rfkill);
938                         hdev->rfkill = NULL;
939                 }
940         }
941
942         hci_notify(hdev, HCI_DEV_REG);
943
944         return id;
945 }
946 EXPORT_SYMBOL(hci_register_dev);
947
948 /* Unregister HCI device */
949 int hci_unregister_dev(struct hci_dev *hdev)
950 {
951         int i;
952
953         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
954
955         write_lock_bh(&hci_dev_list_lock);
956         list_del(&hdev->list);
957         write_unlock_bh(&hci_dev_list_lock);
958
959         hci_dev_do_close(hdev);
960
961         for (i = 0; i < 3; i++)
962                 kfree_skb(hdev->reassembly[i]);
963
964         hci_notify(hdev, HCI_DEV_UNREG);
965
966         if (hdev->rfkill) {
967                 rfkill_unregister(hdev->rfkill);
968                 rfkill_destroy(hdev->rfkill);
969         }
970
971         hci_unregister_sysfs(hdev);
972
973         __hci_dev_put(hdev);
974
975         return 0;
976 }
977 EXPORT_SYMBOL(hci_unregister_dev);
978
979 /* Suspend HCI device */
980 int hci_suspend_dev(struct hci_dev *hdev)
981 {
982         hci_notify(hdev, HCI_DEV_SUSPEND);
983         return 0;
984 }
985 EXPORT_SYMBOL(hci_suspend_dev);
986
987 /* Resume HCI device */
988 int hci_resume_dev(struct hci_dev *hdev)
989 {
990         hci_notify(hdev, HCI_DEV_RESUME);
991         return 0;
992 }
993 EXPORT_SYMBOL(hci_resume_dev);
994
995 /* Receive frame from HCI drivers */
996 int hci_recv_frame(struct sk_buff *skb)
997 {
998         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
999         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1000                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1001                 kfree_skb(skb);
1002                 return -ENXIO;
1003         }
1004
1005         /* Incomming skb */
1006         bt_cb(skb)->incoming = 1;
1007
1008         /* Time stamp */
1009         __net_timestamp(skb);
1010
1011         /* Queue frame for rx task */
1012         skb_queue_tail(&hdev->rx_q, skb);
1013         tasklet_schedule(&hdev->rx_task);
1014
1015         return 0;
1016 }
1017 EXPORT_SYMBOL(hci_recv_frame);
1018
1019 /* Receive packet type fragment */
1020 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 2])
1021
1022 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1023 {
1024         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1025                 return -EILSEQ;
1026
1027         while (count) {
1028                 struct sk_buff *skb = __reassembly(hdev, type);
1029                 struct { int expect; } *scb;
1030                 int len = 0;
1031
1032                 if (!skb) {
1033                         /* Start of the frame */
1034
1035                         switch (type) {
1036                         case HCI_EVENT_PKT:
1037                                 if (count >= HCI_EVENT_HDR_SIZE) {
1038                                         struct hci_event_hdr *h = data;
1039                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1040                                 } else
1041                                         return -EILSEQ;
1042                                 break;
1043
1044                         case HCI_ACLDATA_PKT:
1045                                 if (count >= HCI_ACL_HDR_SIZE) {
1046                                         struct hci_acl_hdr *h = data;
1047                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1048                                 } else
1049                                         return -EILSEQ;
1050                                 break;
1051
1052                         case HCI_SCODATA_PKT:
1053                                 if (count >= HCI_SCO_HDR_SIZE) {
1054                                         struct hci_sco_hdr *h = data;
1055                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1056                                 } else
1057                                         return -EILSEQ;
1058                                 break;
1059                         }
1060
1061                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1062                         if (!skb) {
1063                                 BT_ERR("%s no memory for packet", hdev->name);
1064                                 return -ENOMEM;
1065                         }
1066
1067                         skb->dev = (void *) hdev;
1068                         bt_cb(skb)->pkt_type = type;
1069
1070                         __reassembly(hdev, type) = skb;
1071
1072                         scb = (void *) skb->cb;
1073                         scb->expect = len;
1074                 } else {
1075                         /* Continuation */
1076
1077                         scb = (void *) skb->cb;
1078                         len = scb->expect;
1079                 }
1080
1081                 len = min(len, count);
1082
1083                 memcpy(skb_put(skb, len), data, len);
1084
1085                 scb->expect -= len;
1086
1087                 if (scb->expect == 0) {
1088                         /* Complete frame */
1089
1090                         __reassembly(hdev, type) = NULL;
1091
1092                         bt_cb(skb)->pkt_type = type;
1093                         hci_recv_frame(skb);
1094                 }
1095
1096                 count -= len; data += len;
1097         }
1098
1099         return 0;
1100 }
1101 EXPORT_SYMBOL(hci_recv_fragment);
1102
1103 /* ---- Interface to upper protocols ---- */
1104
1105 /* Register/Unregister protocols.
1106  * hci_task_lock is used to ensure that no tasks are running. */
1107 int hci_register_proto(struct hci_proto *hp)
1108 {
1109         int err = 0;
1110
1111         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1112
1113         if (hp->id >= HCI_MAX_PROTO)
1114                 return -EINVAL;
1115
1116         write_lock_bh(&hci_task_lock);
1117
1118         if (!hci_proto[hp->id])
1119                 hci_proto[hp->id] = hp;
1120         else
1121                 err = -EEXIST;
1122
1123         write_unlock_bh(&hci_task_lock);
1124
1125         return err;
1126 }
1127 EXPORT_SYMBOL(hci_register_proto);
1128
1129 int hci_unregister_proto(struct hci_proto *hp)
1130 {
1131         int err = 0;
1132
1133         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1134
1135         if (hp->id >= HCI_MAX_PROTO)
1136                 return -EINVAL;
1137
1138         write_lock_bh(&hci_task_lock);
1139
1140         if (hci_proto[hp->id])
1141                 hci_proto[hp->id] = NULL;
1142         else
1143                 err = -ENOENT;
1144
1145         write_unlock_bh(&hci_task_lock);
1146
1147         return err;
1148 }
1149 EXPORT_SYMBOL(hci_unregister_proto);
1150
1151 int hci_register_cb(struct hci_cb *cb)
1152 {
1153         BT_DBG("%p name %s", cb, cb->name);
1154
1155         write_lock_bh(&hci_cb_list_lock);
1156         list_add(&cb->list, &hci_cb_list);
1157         write_unlock_bh(&hci_cb_list_lock);
1158
1159         return 0;
1160 }
1161 EXPORT_SYMBOL(hci_register_cb);
1162
1163 int hci_unregister_cb(struct hci_cb *cb)
1164 {
1165         BT_DBG("%p name %s", cb, cb->name);
1166
1167         write_lock_bh(&hci_cb_list_lock);
1168         list_del(&cb->list);
1169         write_unlock_bh(&hci_cb_list_lock);
1170
1171         return 0;
1172 }
1173 EXPORT_SYMBOL(hci_unregister_cb);
1174
1175 static int hci_send_frame(struct sk_buff *skb)
1176 {
1177         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1178
1179         if (!hdev) {
1180                 kfree_skb(skb);
1181                 return -ENODEV;
1182         }
1183
1184         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1185
1186         if (atomic_read(&hdev->promisc)) {
1187                 /* Time stamp */
1188                 __net_timestamp(skb);
1189
1190                 hci_send_to_sock(hdev, skb);
1191         }
1192
1193         /* Get rid of skb owner, prior to sending to the driver. */
1194         skb_orphan(skb);
1195
1196         return hdev->send(skb);
1197 }
1198
1199 /* Send HCI command */
1200 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1201 {
1202         int len = HCI_COMMAND_HDR_SIZE + plen;
1203         struct hci_command_hdr *hdr;
1204         struct sk_buff *skb;
1205
1206         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1207
1208         skb = bt_skb_alloc(len, GFP_ATOMIC);
1209         if (!skb) {
1210                 BT_ERR("%s no memory for command", hdev->name);
1211                 return -ENOMEM;
1212         }
1213
1214         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1215         hdr->opcode = cpu_to_le16(opcode);
1216         hdr->plen   = plen;
1217
1218         if (plen)
1219                 memcpy(skb_put(skb, plen), param, plen);
1220
1221         BT_DBG("skb len %d", skb->len);
1222
1223         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1224         skb->dev = (void *) hdev;
1225
1226         skb_queue_tail(&hdev->cmd_q, skb);
1227         tasklet_schedule(&hdev->cmd_task);
1228
1229         return 0;
1230 }
1231
1232 /* Get data from the previously sent command */
1233 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1234 {
1235         struct hci_command_hdr *hdr;
1236
1237         if (!hdev->sent_cmd)
1238                 return NULL;
1239
1240         hdr = (void *) hdev->sent_cmd->data;
1241
1242         if (hdr->opcode != cpu_to_le16(opcode))
1243                 return NULL;
1244
1245         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1246
1247         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1248 }
1249
1250 /* Send ACL data */
1251 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1252 {
1253         struct hci_acl_hdr *hdr;
1254         int len = skb->len;
1255
1256         skb_push(skb, HCI_ACL_HDR_SIZE);
1257         skb_reset_transport_header(skb);
1258         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1259         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1260         hdr->dlen   = cpu_to_le16(len);
1261 }
1262
1263 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1264 {
1265         struct hci_dev *hdev = conn->hdev;
1266         struct sk_buff *list;
1267
1268         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1269
1270         skb->dev = (void *) hdev;
1271         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1272         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1273
1274         if (!(list = skb_shinfo(skb)->frag_list)) {
1275                 /* Non fragmented */
1276                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1277
1278                 skb_queue_tail(&conn->data_q, skb);
1279         } else {
1280                 /* Fragmented */
1281                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1282
1283                 skb_shinfo(skb)->frag_list = NULL;
1284
1285                 /* Queue all fragments atomically */
1286                 spin_lock_bh(&conn->data_q.lock);
1287
1288                 __skb_queue_tail(&conn->data_q, skb);
1289                 do {
1290                         skb = list; list = list->next;
1291
1292                         skb->dev = (void *) hdev;
1293                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1294                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1295
1296                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1297
1298                         __skb_queue_tail(&conn->data_q, skb);
1299                 } while (list);
1300
1301                 spin_unlock_bh(&conn->data_q.lock);
1302         }
1303
1304         tasklet_schedule(&hdev->tx_task);
1305
1306         return 0;
1307 }
1308 EXPORT_SYMBOL(hci_send_acl);
1309
1310 /* Send SCO data */
1311 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1312 {
1313         struct hci_dev *hdev = conn->hdev;
1314         struct hci_sco_hdr hdr;
1315
1316         BT_DBG("%s len %d", hdev->name, skb->len);
1317
1318         hdr.handle = cpu_to_le16(conn->handle);
1319         hdr.dlen   = skb->len;
1320
1321         skb_push(skb, HCI_SCO_HDR_SIZE);
1322         skb_reset_transport_header(skb);
1323         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1324
1325         skb->dev = (void *) hdev;
1326         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1327
1328         skb_queue_tail(&conn->data_q, skb);
1329         tasklet_schedule(&hdev->tx_task);
1330 }
1331 EXPORT_SYMBOL(hci_send_sco);
1332
1333 /* ---- HCI TX task (outgoing data) ---- */
1334
1335 /* HCI Connection scheduler */
1336 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1337 {
1338         struct hci_conn_hash *h = &hdev->conn_hash;
1339         struct hci_conn *conn = NULL;
1340         int num = 0, min = ~0;
1341         struct list_head *p;
1342
1343         /* We don't have to lock device here. Connections are always
1344          * added and removed with TX task disabled. */
1345         list_for_each(p, &h->list) {
1346                 struct hci_conn *c;
1347                 c = list_entry(p, struct hci_conn, list);
1348
1349                 if (c->type != type || skb_queue_empty(&c->data_q))
1350                         continue;
1351
1352                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1353                         continue;
1354
1355                 num++;
1356
1357                 if (c->sent < min) {
1358                         min  = c->sent;
1359                         conn = c;
1360                 }
1361         }
1362
1363         if (conn) {
1364                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1365                 int q = cnt / num;
1366                 *quote = q ? q : 1;
1367         } else
1368                 *quote = 0;
1369
1370         BT_DBG("conn %p quote %d", conn, *quote);
1371         return conn;
1372 }
1373
1374 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1375 {
1376         struct hci_conn_hash *h = &hdev->conn_hash;
1377         struct list_head *p;
1378         struct hci_conn  *c;
1379
1380         BT_ERR("%s ACL tx timeout", hdev->name);
1381
1382         /* Kill stalled connections */
1383         list_for_each(p, &h->list) {
1384                 c = list_entry(p, struct hci_conn, list);
1385                 if (c->type == ACL_LINK && c->sent) {
1386                         BT_ERR("%s killing stalled ACL connection %s",
1387                                 hdev->name, batostr(&c->dst));
1388                         hci_acl_disconn(c, 0x13);
1389                 }
1390         }
1391 }
1392
1393 static inline void hci_sched_acl(struct hci_dev *hdev)
1394 {
1395         struct hci_conn *conn;
1396         struct sk_buff *skb;
1397         int quote;
1398
1399         BT_DBG("%s", hdev->name);
1400
1401         if (!test_bit(HCI_RAW, &hdev->flags)) {
1402                 /* ACL tx timeout must be longer than maximum
1403                  * link supervision timeout (40.9 seconds) */
1404                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1405                         hci_acl_tx_to(hdev);
1406         }
1407
1408         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1409                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1410                         BT_DBG("skb %p len %d", skb, skb->len);
1411
1412                         hci_conn_enter_active_mode(conn);
1413
1414                         hci_send_frame(skb);
1415                         hdev->acl_last_tx = jiffies;
1416
1417                         hdev->acl_cnt--;
1418                         conn->sent++;
1419                 }
1420         }
1421 }
1422
1423 /* Schedule SCO */
1424 static inline void hci_sched_sco(struct hci_dev *hdev)
1425 {
1426         struct hci_conn *conn;
1427         struct sk_buff *skb;
1428         int quote;
1429
1430         BT_DBG("%s", hdev->name);
1431
1432         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1433                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1434                         BT_DBG("skb %p len %d", skb, skb->len);
1435                         hci_send_frame(skb);
1436
1437                         conn->sent++;
1438                         if (conn->sent == ~0)
1439                                 conn->sent = 0;
1440                 }
1441         }
1442 }
1443
1444 static inline void hci_sched_esco(struct hci_dev *hdev)
1445 {
1446         struct hci_conn *conn;
1447         struct sk_buff *skb;
1448         int quote;
1449
1450         BT_DBG("%s", hdev->name);
1451
1452         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1453                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1454                         BT_DBG("skb %p len %d", skb, skb->len);
1455                         hci_send_frame(skb);
1456
1457                         conn->sent++;
1458                         if (conn->sent == ~0)
1459                                 conn->sent = 0;
1460                 }
1461         }
1462 }
1463
1464 static void hci_tx_task(unsigned long arg)
1465 {
1466         struct hci_dev *hdev = (struct hci_dev *) arg;
1467         struct sk_buff *skb;
1468
1469         read_lock(&hci_task_lock);
1470
1471         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1472
1473         /* Schedule queues and send stuff to HCI driver */
1474
1475         hci_sched_acl(hdev);
1476
1477         hci_sched_sco(hdev);
1478
1479         hci_sched_esco(hdev);
1480
1481         /* Send next queued raw (unknown type) packet */
1482         while ((skb = skb_dequeue(&hdev->raw_q)))
1483                 hci_send_frame(skb);
1484
1485         read_unlock(&hci_task_lock);
1486 }
1487
1488 /* ----- HCI RX task (incoming data proccessing) ----- */
1489
1490 /* ACL data packet */
1491 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1492 {
1493         struct hci_acl_hdr *hdr = (void *) skb->data;
1494         struct hci_conn *conn;
1495         __u16 handle, flags;
1496
1497         skb_pull(skb, HCI_ACL_HDR_SIZE);
1498
1499         handle = __le16_to_cpu(hdr->handle);
1500         flags  = hci_flags(handle);
1501         handle = hci_handle(handle);
1502
1503         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1504
1505         hdev->stat.acl_rx++;
1506
1507         hci_dev_lock(hdev);
1508         conn = hci_conn_hash_lookup_handle(hdev, handle);
1509         hci_dev_unlock(hdev);
1510
1511         if (conn) {
1512                 register struct hci_proto *hp;
1513
1514                 hci_conn_enter_active_mode(conn);
1515
1516                 /* Send to upper protocol */
1517                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1518                         hp->recv_acldata(conn, skb, flags);
1519                         return;
1520                 }
1521         } else {
1522                 BT_ERR("%s ACL packet for unknown connection handle %d",
1523                         hdev->name, handle);
1524         }
1525
1526         kfree_skb(skb);
1527 }
1528
1529 /* SCO data packet */
1530 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1531 {
1532         struct hci_sco_hdr *hdr = (void *) skb->data;
1533         struct hci_conn *conn;
1534         __u16 handle;
1535
1536         skb_pull(skb, HCI_SCO_HDR_SIZE);
1537
1538         handle = __le16_to_cpu(hdr->handle);
1539
1540         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1541
1542         hdev->stat.sco_rx++;
1543
1544         hci_dev_lock(hdev);
1545         conn = hci_conn_hash_lookup_handle(hdev, handle);
1546         hci_dev_unlock(hdev);
1547
1548         if (conn) {
1549                 register struct hci_proto *hp;
1550
1551                 /* Send to upper protocol */
1552                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1553                         hp->recv_scodata(conn, skb);
1554                         return;
1555                 }
1556         } else {
1557                 BT_ERR("%s SCO packet for unknown connection handle %d",
1558                         hdev->name, handle);
1559         }
1560
1561         kfree_skb(skb);
1562 }
1563
1564 static void hci_rx_task(unsigned long arg)
1565 {
1566         struct hci_dev *hdev = (struct hci_dev *) arg;
1567         struct sk_buff *skb;
1568
1569         BT_DBG("%s", hdev->name);
1570
1571         read_lock(&hci_task_lock);
1572
1573         while ((skb = skb_dequeue(&hdev->rx_q))) {
1574                 if (atomic_read(&hdev->promisc)) {
1575                         /* Send copy to the sockets */
1576                         hci_send_to_sock(hdev, skb);
1577                 }
1578
1579                 if (test_bit(HCI_RAW, &hdev->flags)) {
1580                         kfree_skb(skb);
1581                         continue;
1582                 }
1583
1584                 if (test_bit(HCI_INIT, &hdev->flags)) {
1585                         /* Don't process data packets in this states. */
1586                         switch (bt_cb(skb)->pkt_type) {
1587                         case HCI_ACLDATA_PKT:
1588                         case HCI_SCODATA_PKT:
1589                                 kfree_skb(skb);
1590                                 continue;
1591                         }
1592                 }
1593
1594                 /* Process frame */
1595                 switch (bt_cb(skb)->pkt_type) {
1596                 case HCI_EVENT_PKT:
1597                         hci_event_packet(hdev, skb);
1598                         break;
1599
1600                 case HCI_ACLDATA_PKT:
1601                         BT_DBG("%s ACL data packet", hdev->name);
1602                         hci_acldata_packet(hdev, skb);
1603                         break;
1604
1605                 case HCI_SCODATA_PKT:
1606                         BT_DBG("%s SCO data packet", hdev->name);
1607                         hci_scodata_packet(hdev, skb);
1608                         break;
1609
1610                 default:
1611                         kfree_skb(skb);
1612                         break;
1613                 }
1614         }
1615
1616         read_unlock(&hci_task_lock);
1617 }
1618
1619 static void hci_cmd_task(unsigned long arg)
1620 {
1621         struct hci_dev *hdev = (struct hci_dev *) arg;
1622         struct sk_buff *skb;
1623
1624         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1625
1626         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1627                 BT_ERR("%s command tx timeout", hdev->name);
1628                 atomic_set(&hdev->cmd_cnt, 1);
1629         }
1630
1631         /* Send queued commands */
1632         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1633                 kfree_skb(hdev->sent_cmd);
1634
1635                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1636                         atomic_dec(&hdev->cmd_cnt);
1637                         hci_send_frame(skb);
1638                         hdev->cmd_last_tx = jiffies;
1639                 } else {
1640                         skb_queue_head(&hdev->cmd_q, skb);
1641                         tasklet_schedule(&hdev->cmd_task);
1642                 }
1643         }
1644 }