2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
142 read_unlock(&l->lock);
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 s = __l2cap_get_chan_by_ident(l, ident);
163 read_unlock(&l->lock);
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
200 l2cap_pi(next)->prev_c = prev;
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
237 bt_accept_enqueue(parent, sk);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
294 auth_type = HCI_AT_NO_BONDING;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static void l2cap_do_start(struct sock *sk)
371 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
373 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
374 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
377 if (l2cap_check_security(sk)) {
378 struct l2cap_conn_req req;
379 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
380 req.psm = l2cap_pi(sk)->psm;
382 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
384 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
385 L2CAP_CONN_REQ, sizeof(req), &req);
388 struct l2cap_info_req req;
389 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
391 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
392 conn->info_ident = l2cap_get_ident(conn);
394 mod_timer(&conn->info_timer, jiffies +
395 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
397 l2cap_send_cmd(conn, conn->info_ident,
398 L2CAP_INFO_REQ, sizeof(req), &req);
402 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
404 struct l2cap_disconn_req req;
406 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
407 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
408 l2cap_send_cmd(conn, l2cap_get_ident(conn),
409 L2CAP_DISCONN_REQ, sizeof(req), &req);
412 /* ---- L2CAP connections ---- */
413 static void l2cap_conn_start(struct l2cap_conn *conn)
415 struct l2cap_chan_list *l = &conn->chan_list;
418 BT_DBG("conn %p", conn);
422 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
425 if (sk->sk_type != SOCK_SEQPACKET) {
430 if (sk->sk_state == BT_CONNECT) {
431 if (l2cap_check_security(sk)) {
432 struct l2cap_conn_req req;
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 req.psm = l2cap_pi(sk)->psm;
436 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
438 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
439 L2CAP_CONN_REQ, sizeof(req), &req);
441 } else if (sk->sk_state == BT_CONNECT2) {
442 struct l2cap_conn_rsp rsp;
443 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
444 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
446 if (l2cap_check_security(sk)) {
447 if (bt_sk(sk)->defer_setup) {
448 struct sock *parent = bt_sk(sk)->parent;
449 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
450 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
451 parent->sk_data_ready(parent, 0);
454 sk->sk_state = BT_CONFIG;
455 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
456 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
459 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
460 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
470 read_unlock(&l->lock);
473 static void l2cap_conn_ready(struct l2cap_conn *conn)
475 struct l2cap_chan_list *l = &conn->chan_list;
478 BT_DBG("conn %p", conn);
482 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
485 if (sk->sk_type != SOCK_SEQPACKET) {
486 l2cap_sock_clear_timer(sk);
487 sk->sk_state = BT_CONNECTED;
488 sk->sk_state_change(sk);
489 } else if (sk->sk_state == BT_CONNECT)
495 read_unlock(&l->lock);
498 /* Notify sockets that we cannot guaranty reliability anymore */
499 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
509 if (l2cap_pi(sk)->force_reliable)
513 read_unlock(&l->lock);
516 static void l2cap_info_timeout(unsigned long arg)
518 struct l2cap_conn *conn = (void *) arg;
520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
521 conn->info_ident = 0;
523 l2cap_conn_start(conn);
526 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
528 struct l2cap_conn *conn = hcon->l2cap_data;
533 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
537 hcon->l2cap_data = conn;
540 BT_DBG("hcon %p conn %p", hcon, conn);
542 conn->mtu = hcon->hdev->acl_mtu;
543 conn->src = &hcon->hdev->bdaddr;
544 conn->dst = &hcon->dst;
548 setup_timer(&conn->info_timer, l2cap_info_timeout,
549 (unsigned long) conn);
551 spin_lock_init(&conn->lock);
552 rwlock_init(&conn->chan_list.lock);
554 conn->disc_reason = 0x13;
559 static void l2cap_conn_del(struct hci_conn *hcon, int err)
561 struct l2cap_conn *conn = hcon->l2cap_data;
567 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
569 kfree_skb(conn->rx_skb);
572 while ((sk = conn->chan_list.head)) {
574 l2cap_chan_del(sk, err);
579 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
580 del_timer_sync(&conn->info_timer);
582 hcon->l2cap_data = NULL;
586 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
588 struct l2cap_chan_list *l = &conn->chan_list;
589 write_lock_bh(&l->lock);
590 __l2cap_chan_add(conn, sk, parent);
591 write_unlock_bh(&l->lock);
594 /* ---- Socket interface ---- */
595 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
598 struct hlist_node *node;
599 sk_for_each(sk, node, &l2cap_sk_list.head)
600 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
607 /* Find socket with psm and source bdaddr.
608 * Returns closest match.
610 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
612 struct sock *sk = NULL, *sk1 = NULL;
613 struct hlist_node *node;
615 sk_for_each(sk, node, &l2cap_sk_list.head) {
616 if (state && sk->sk_state != state)
619 if (l2cap_pi(sk)->psm == psm) {
621 if (!bacmp(&bt_sk(sk)->src, src))
625 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
629 return node ? sk : sk1;
632 /* Find socket with given address (psm, src).
633 * Returns locked socket */
634 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 read_lock(&l2cap_sk_list.lock);
638 s = __l2cap_get_sock_by_psm(state, psm, src);
641 read_unlock(&l2cap_sk_list.lock);
645 static void l2cap_sock_destruct(struct sock *sk)
649 skb_queue_purge(&sk->sk_receive_queue);
650 skb_queue_purge(&sk->sk_write_queue);
653 static void l2cap_sock_cleanup_listen(struct sock *parent)
657 BT_DBG("parent %p", parent);
659 /* Close not yet accepted channels */
660 while ((sk = bt_accept_dequeue(parent, NULL)))
661 l2cap_sock_close(sk);
663 parent->sk_state = BT_CLOSED;
664 sock_set_flag(parent, SOCK_ZAPPED);
667 /* Kill socket (only if zapped and orphan)
668 * Must be called on unlocked socket.
670 static void l2cap_sock_kill(struct sock *sk)
672 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
675 BT_DBG("sk %p state %d", sk, sk->sk_state);
677 /* Kill poor orphan */
678 bt_sock_unlink(&l2cap_sk_list, sk);
679 sock_set_flag(sk, SOCK_DEAD);
683 static void __l2cap_sock_close(struct sock *sk, int reason)
685 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
687 switch (sk->sk_state) {
689 l2cap_sock_cleanup_listen(sk);
694 if (sk->sk_type == SOCK_SEQPACKET) {
695 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
697 sk->sk_state = BT_DISCONN;
698 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
699 l2cap_send_disconn_req(conn, sk);
701 l2cap_chan_del(sk, reason);
705 if (sk->sk_type == SOCK_SEQPACKET) {
706 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
707 struct l2cap_conn_rsp rsp;
710 if (bt_sk(sk)->defer_setup)
711 result = L2CAP_CR_SEC_BLOCK;
713 result = L2CAP_CR_BAD_PSM;
715 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
716 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
717 rsp.result = cpu_to_le16(result);
718 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
719 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
720 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
722 l2cap_chan_del(sk, reason);
727 l2cap_chan_del(sk, reason);
731 sock_set_flag(sk, SOCK_ZAPPED);
736 /* Must be called on unlocked socket. */
737 static void l2cap_sock_close(struct sock *sk)
739 l2cap_sock_clear_timer(sk);
741 __l2cap_sock_close(sk, ECONNRESET);
746 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
748 struct l2cap_pinfo *pi = l2cap_pi(sk);
753 sk->sk_type = parent->sk_type;
754 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
756 pi->imtu = l2cap_pi(parent)->imtu;
757 pi->omtu = l2cap_pi(parent)->omtu;
758 pi->mode = l2cap_pi(parent)->mode;
759 pi->fcs = l2cap_pi(parent)->fcs;
760 pi->sec_level = l2cap_pi(parent)->sec_level;
761 pi->role_switch = l2cap_pi(parent)->role_switch;
762 pi->force_reliable = l2cap_pi(parent)->force_reliable;
764 pi->imtu = L2CAP_DEFAULT_MTU;
766 pi->mode = L2CAP_MODE_BASIC;
767 pi->fcs = L2CAP_FCS_CRC16;
768 pi->sec_level = BT_SECURITY_LOW;
770 pi->force_reliable = 0;
773 /* Default config options */
775 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
778 static struct proto l2cap_proto = {
780 .owner = THIS_MODULE,
781 .obj_size = sizeof(struct l2cap_pinfo)
784 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
788 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
792 sock_init_data(sock, sk);
793 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
795 sk->sk_destruct = l2cap_sock_destruct;
796 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
798 sock_reset_flag(sk, SOCK_ZAPPED);
800 sk->sk_protocol = proto;
801 sk->sk_state = BT_OPEN;
803 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
805 bt_sock_link(&l2cap_sk_list, sk);
809 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
813 BT_DBG("sock %p", sock);
815 sock->state = SS_UNCONNECTED;
817 if (sock->type != SOCK_SEQPACKET &&
818 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
819 return -ESOCKTNOSUPPORT;
821 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
824 sock->ops = &l2cap_sock_ops;
826 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
830 l2cap_sock_init(sk, NULL);
834 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
836 struct sock *sk = sock->sk;
837 struct sockaddr_l2 la;
842 if (!addr || addr->sa_family != AF_BLUETOOTH)
845 memset(&la, 0, sizeof(la));
846 len = min_t(unsigned int, sizeof(la), alen);
847 memcpy(&la, addr, len);
854 if (sk->sk_state != BT_OPEN) {
859 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
860 !capable(CAP_NET_BIND_SERVICE)) {
865 write_lock_bh(&l2cap_sk_list.lock);
867 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
870 /* Save source address */
871 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
872 l2cap_pi(sk)->psm = la.l2_psm;
873 l2cap_pi(sk)->sport = la.l2_psm;
874 sk->sk_state = BT_BOUND;
876 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
877 __le16_to_cpu(la.l2_psm) == 0x0003)
878 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
881 write_unlock_bh(&l2cap_sk_list.lock);
888 static int l2cap_do_connect(struct sock *sk)
890 bdaddr_t *src = &bt_sk(sk)->src;
891 bdaddr_t *dst = &bt_sk(sk)->dst;
892 struct l2cap_conn *conn;
893 struct hci_conn *hcon;
894 struct hci_dev *hdev;
898 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
901 hdev = hci_get_route(dst, src);
903 return -EHOSTUNREACH;
905 hci_dev_lock_bh(hdev);
909 if (sk->sk_type == SOCK_RAW) {
910 switch (l2cap_pi(sk)->sec_level) {
911 case BT_SECURITY_HIGH:
912 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
914 case BT_SECURITY_MEDIUM:
915 auth_type = HCI_AT_DEDICATED_BONDING;
918 auth_type = HCI_AT_NO_BONDING;
921 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
922 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
923 auth_type = HCI_AT_NO_BONDING_MITM;
925 auth_type = HCI_AT_NO_BONDING;
927 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
928 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
930 switch (l2cap_pi(sk)->sec_level) {
931 case BT_SECURITY_HIGH:
932 auth_type = HCI_AT_GENERAL_BONDING_MITM;
934 case BT_SECURITY_MEDIUM:
935 auth_type = HCI_AT_GENERAL_BONDING;
938 auth_type = HCI_AT_NO_BONDING;
943 hcon = hci_connect(hdev, ACL_LINK, dst,
944 l2cap_pi(sk)->sec_level, auth_type);
948 conn = l2cap_conn_add(hcon, 0);
956 /* Update source addr of the socket */
957 bacpy(src, conn->src);
959 l2cap_chan_add(conn, sk, NULL);
961 sk->sk_state = BT_CONNECT;
962 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
964 if (hcon->state == BT_CONNECTED) {
965 if (sk->sk_type != SOCK_SEQPACKET) {
966 l2cap_sock_clear_timer(sk);
967 sk->sk_state = BT_CONNECTED;
973 hci_dev_unlock_bh(hdev);
978 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
980 struct sock *sk = sock->sk;
981 struct sockaddr_l2 la;
986 if (!addr || addr->sa_family != AF_BLUETOOTH)
989 memset(&la, 0, sizeof(la));
990 len = min_t(unsigned int, sizeof(la), alen);
991 memcpy(&la, addr, len);
998 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1003 switch (l2cap_pi(sk)->mode) {
1004 case L2CAP_MODE_BASIC:
1006 case L2CAP_MODE_ERTM:
1007 case L2CAP_MODE_STREAMING:
1016 switch (sk->sk_state) {
1020 /* Already connecting */
1024 /* Already connected */
1037 /* Set destination address and psm */
1038 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1039 l2cap_pi(sk)->psm = la.l2_psm;
1041 err = l2cap_do_connect(sk);
1046 err = bt_sock_wait_state(sk, BT_CONNECTED,
1047 sock_sndtimeo(sk, flags & O_NONBLOCK));
1053 static int l2cap_sock_listen(struct socket *sock, int backlog)
1055 struct sock *sk = sock->sk;
1058 BT_DBG("sk %p backlog %d", sk, backlog);
1062 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1067 switch (l2cap_pi(sk)->mode) {
1068 case L2CAP_MODE_BASIC:
1070 case L2CAP_MODE_ERTM:
1071 case L2CAP_MODE_STREAMING:
1080 if (!l2cap_pi(sk)->psm) {
1081 bdaddr_t *src = &bt_sk(sk)->src;
1086 write_lock_bh(&l2cap_sk_list.lock);
1088 for (psm = 0x1001; psm < 0x1100; psm += 2)
1089 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1090 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1091 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1096 write_unlock_bh(&l2cap_sk_list.lock);
1102 sk->sk_max_ack_backlog = backlog;
1103 sk->sk_ack_backlog = 0;
1104 sk->sk_state = BT_LISTEN;
1111 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct sock *sk = sock->sk, *nsk;
1118 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1120 if (sk->sk_state != BT_LISTEN) {
1125 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1127 BT_DBG("sk %p timeo %ld", sk, timeo);
1129 /* Wait for an incoming connection. (wake-one). */
1130 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1131 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1132 set_current_state(TASK_INTERRUPTIBLE);
1139 timeo = schedule_timeout(timeo);
1140 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1142 if (sk->sk_state != BT_LISTEN) {
1147 if (signal_pending(current)) {
1148 err = sock_intr_errno(timeo);
1152 set_current_state(TASK_RUNNING);
1153 remove_wait_queue(sk->sk_sleep, &wait);
1158 newsock->state = SS_CONNECTED;
1160 BT_DBG("new socket %p", nsk);
1167 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1169 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1170 struct sock *sk = sock->sk;
1172 BT_DBG("sock %p, sk %p", sock, sk);
1174 addr->sa_family = AF_BLUETOOTH;
1175 *len = sizeof(struct sockaddr_l2);
1178 la->l2_psm = l2cap_pi(sk)->psm;
1179 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1180 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1182 la->l2_psm = l2cap_pi(sk)->sport;
1183 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1184 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1190 static void l2cap_monitor_timeout(unsigned long arg)
1192 struct sock *sk = (void *) arg;
1195 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1196 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1200 l2cap_pi(sk)->retry_count++;
1201 __mod_monitor_timer();
1203 control = L2CAP_CTRL_POLL;
1204 control |= L2CAP_SUPER_RCV_READY;
1205 l2cap_send_sframe(l2cap_pi(sk), control);
1208 static void l2cap_retrans_timeout(unsigned long arg)
1210 struct sock *sk = (void *) arg;
1213 l2cap_pi(sk)->retry_count = 1;
1214 __mod_monitor_timer();
1216 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1218 control = L2CAP_CTRL_POLL;
1219 control |= L2CAP_SUPER_RCV_READY;
1220 l2cap_send_sframe(l2cap_pi(sk), control);
1223 static void l2cap_drop_acked_frames(struct sock *sk)
1225 struct sk_buff *skb;
1227 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1228 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1231 skb = skb_dequeue(TX_QUEUE(sk));
1234 l2cap_pi(sk)->unacked_frames--;
1237 if (!l2cap_pi(sk)->unacked_frames)
1238 del_timer(&l2cap_pi(sk)->retrans_timer);
1243 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1245 struct l2cap_pinfo *pi = l2cap_pi(sk);
1248 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1250 err = hci_send_acl(pi->conn->hcon, skb, 0);
1257 static int l2cap_streaming_send(struct sock *sk)
1259 struct sk_buff *skb, *tx_skb;
1260 struct l2cap_pinfo *pi = l2cap_pi(sk);
1264 while ((skb = sk->sk_send_head)) {
1265 tx_skb = skb_clone(skb, GFP_ATOMIC);
1267 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1268 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1269 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1271 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1272 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1273 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1276 err = l2cap_do_send(sk, tx_skb);
1278 l2cap_send_disconn_req(pi->conn, sk);
1282 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1284 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1285 sk->sk_send_head = NULL;
1287 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1289 skb = skb_dequeue(TX_QUEUE(sk));
1295 static int l2cap_ertm_send(struct sock *sk)
1297 struct sk_buff *skb, *tx_skb;
1298 struct l2cap_pinfo *pi = l2cap_pi(sk);
1302 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1305 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1306 tx_skb = skb_clone(skb, GFP_ATOMIC);
1308 if (pi->remote_max_tx &&
1309 bt_cb(skb)->retries == pi->remote_max_tx) {
1310 l2cap_send_disconn_req(pi->conn, sk);
1314 bt_cb(skb)->retries++;
1316 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1317 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1322 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1324 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1327 err = l2cap_do_send(sk, tx_skb);
1329 l2cap_send_disconn_req(pi->conn, sk);
1332 __mod_retrans_timer();
1334 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1335 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1337 pi->unacked_frames++;
1339 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1340 sk->sk_send_head = NULL;
1342 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1348 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1350 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1351 struct sk_buff **frag;
1354 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1361 /* Continuation fragments (no L2CAP header) */
1362 frag = &skb_shinfo(skb)->frag_list;
1364 count = min_t(unsigned int, conn->mtu, len);
1366 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1369 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1375 frag = &(*frag)->next;
1381 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1383 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1384 struct sk_buff *skb;
1385 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1386 struct l2cap_hdr *lh;
1388 BT_DBG("sk %p len %d", sk, (int)len);
1390 count = min_t(unsigned int, (conn->mtu - hlen), len);
1391 skb = bt_skb_send_alloc(sk, count + hlen,
1392 msg->msg_flags & MSG_DONTWAIT, &err);
1394 return ERR_PTR(-ENOMEM);
1396 /* Create L2CAP header */
1397 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1398 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1399 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1400 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1402 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1403 if (unlikely(err < 0)) {
1405 return ERR_PTR(err);
1410 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1412 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1413 struct sk_buff *skb;
1414 int err, count, hlen = L2CAP_HDR_SIZE;
1415 struct l2cap_hdr *lh;
1417 BT_DBG("sk %p len %d", sk, (int)len);
1419 count = min_t(unsigned int, (conn->mtu - hlen), len);
1420 skb = bt_skb_send_alloc(sk, count + hlen,
1421 msg->msg_flags & MSG_DONTWAIT, &err);
1423 return ERR_PTR(-ENOMEM);
1425 /* Create L2CAP header */
1426 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1427 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1428 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1430 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1431 if (unlikely(err < 0)) {
1433 return ERR_PTR(err);
1438 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1441 struct sk_buff *skb;
1442 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1443 struct l2cap_hdr *lh;
1445 BT_DBG("sk %p len %d", sk, (int)len);
1450 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1453 count = min_t(unsigned int, (conn->mtu - hlen), len);
1454 skb = bt_skb_send_alloc(sk, count + hlen,
1455 msg->msg_flags & MSG_DONTWAIT, &err);
1457 return ERR_PTR(-ENOMEM);
1459 /* Create L2CAP header */
1460 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1461 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1462 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1463 put_unaligned_le16(control, skb_put(skb, 2));
1465 put_unaligned_le16(sdulen, skb_put(skb, 2));
1467 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1468 if (unlikely(err < 0)) {
1470 return ERR_PTR(err);
1473 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1474 put_unaligned_le16(0, skb_put(skb, 2));
1476 bt_cb(skb)->retries = 0;
1480 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1482 struct l2cap_pinfo *pi = l2cap_pi(sk);
1483 struct sk_buff *skb;
1484 struct sk_buff_head sar_queue;
1488 __skb_queue_head_init(&sar_queue);
1489 control = L2CAP_SDU_START;
1490 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1492 return PTR_ERR(skb);
1494 __skb_queue_tail(&sar_queue, skb);
1495 len -= pi->max_pdu_size;
1496 size +=pi->max_pdu_size;
1502 if (len > pi->max_pdu_size) {
1503 control |= L2CAP_SDU_CONTINUE;
1504 buflen = pi->max_pdu_size;
1506 control |= L2CAP_SDU_END;
1510 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1512 skb_queue_purge(&sar_queue);
1513 return PTR_ERR(skb);
1516 __skb_queue_tail(&sar_queue, skb);
1521 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1522 if (sk->sk_send_head == NULL)
1523 sk->sk_send_head = sar_queue.next;
1528 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1530 struct sock *sk = sock->sk;
1531 struct l2cap_pinfo *pi = l2cap_pi(sk);
1532 struct sk_buff *skb;
1536 BT_DBG("sock %p, sk %p", sock, sk);
1538 err = sock_error(sk);
1542 if (msg->msg_flags & MSG_OOB)
1545 /* Check outgoing MTU */
1546 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1552 if (sk->sk_state != BT_CONNECTED) {
1557 /* Connectionless channel */
1558 if (sk->sk_type == SOCK_DGRAM) {
1559 skb = l2cap_create_connless_pdu(sk, msg, len);
1560 err = l2cap_do_send(sk, skb);
1565 case L2CAP_MODE_BASIC:
1566 /* Create a basic PDU */
1567 skb = l2cap_create_basic_pdu(sk, msg, len);
1573 err = l2cap_do_send(sk, skb);
1578 case L2CAP_MODE_ERTM:
1579 case L2CAP_MODE_STREAMING:
1580 /* Entire SDU fits into one PDU */
1581 if (len <= pi->max_pdu_size) {
1582 control = L2CAP_SDU_UNSEGMENTED;
1583 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1588 __skb_queue_tail(TX_QUEUE(sk), skb);
1589 if (sk->sk_send_head == NULL)
1590 sk->sk_send_head = skb;
1592 /* Segment SDU into multiples PDUs */
1593 err = l2cap_sar_segment_sdu(sk, msg, len);
1598 if (pi->mode == L2CAP_MODE_STREAMING)
1599 err = l2cap_streaming_send(sk);
1601 err = l2cap_ertm_send(sk);
1608 BT_DBG("bad state %1.1x", pi->mode);
1617 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1619 struct sock *sk = sock->sk;
1623 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1624 struct l2cap_conn_rsp rsp;
1626 sk->sk_state = BT_CONFIG;
1628 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1629 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1630 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1631 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1632 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1633 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1641 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1644 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1646 struct sock *sk = sock->sk;
1647 struct l2cap_options opts;
1651 BT_DBG("sk %p", sk);
1657 opts.imtu = l2cap_pi(sk)->imtu;
1658 opts.omtu = l2cap_pi(sk)->omtu;
1659 opts.flush_to = l2cap_pi(sk)->flush_to;
1660 opts.mode = l2cap_pi(sk)->mode;
1661 opts.fcs = l2cap_pi(sk)->fcs;
1663 len = min_t(unsigned int, sizeof(opts), optlen);
1664 if (copy_from_user((char *) &opts, optval, len)) {
1669 l2cap_pi(sk)->imtu = opts.imtu;
1670 l2cap_pi(sk)->omtu = opts.omtu;
1671 l2cap_pi(sk)->mode = opts.mode;
1672 l2cap_pi(sk)->fcs = opts.fcs;
1676 if (get_user(opt, (u32 __user *) optval)) {
1681 if (opt & L2CAP_LM_AUTH)
1682 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1683 if (opt & L2CAP_LM_ENCRYPT)
1684 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1685 if (opt & L2CAP_LM_SECURE)
1686 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1688 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1689 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1701 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1703 struct sock *sk = sock->sk;
1704 struct bt_security sec;
1708 BT_DBG("sk %p", sk);
1710 if (level == SOL_L2CAP)
1711 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1713 if (level != SOL_BLUETOOTH)
1714 return -ENOPROTOOPT;
1720 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1725 sec.level = BT_SECURITY_LOW;
1727 len = min_t(unsigned int, sizeof(sec), optlen);
1728 if (copy_from_user((char *) &sec, optval, len)) {
1733 if (sec.level < BT_SECURITY_LOW ||
1734 sec.level > BT_SECURITY_HIGH) {
1739 l2cap_pi(sk)->sec_level = sec.level;
1742 case BT_DEFER_SETUP:
1743 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1748 if (get_user(opt, (u32 __user *) optval)) {
1753 bt_sk(sk)->defer_setup = opt;
1765 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1767 struct sock *sk = sock->sk;
1768 struct l2cap_options opts;
1769 struct l2cap_conninfo cinfo;
1773 BT_DBG("sk %p", sk);
1775 if (get_user(len, optlen))
1782 opts.imtu = l2cap_pi(sk)->imtu;
1783 opts.omtu = l2cap_pi(sk)->omtu;
1784 opts.flush_to = l2cap_pi(sk)->flush_to;
1785 opts.mode = l2cap_pi(sk)->mode;
1786 opts.fcs = l2cap_pi(sk)->fcs;
1788 len = min_t(unsigned int, len, sizeof(opts));
1789 if (copy_to_user(optval, (char *) &opts, len))
1795 switch (l2cap_pi(sk)->sec_level) {
1796 case BT_SECURITY_LOW:
1797 opt = L2CAP_LM_AUTH;
1799 case BT_SECURITY_MEDIUM:
1800 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1802 case BT_SECURITY_HIGH:
1803 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1811 if (l2cap_pi(sk)->role_switch)
1812 opt |= L2CAP_LM_MASTER;
1814 if (l2cap_pi(sk)->force_reliable)
1815 opt |= L2CAP_LM_RELIABLE;
1817 if (put_user(opt, (u32 __user *) optval))
1821 case L2CAP_CONNINFO:
1822 if (sk->sk_state != BT_CONNECTED &&
1823 !(sk->sk_state == BT_CONNECT2 &&
1824 bt_sk(sk)->defer_setup)) {
1829 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1830 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1832 len = min_t(unsigned int, len, sizeof(cinfo));
1833 if (copy_to_user(optval, (char *) &cinfo, len))
1847 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1849 struct sock *sk = sock->sk;
1850 struct bt_security sec;
1853 BT_DBG("sk %p", sk);
1855 if (level == SOL_L2CAP)
1856 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1858 if (level != SOL_BLUETOOTH)
1859 return -ENOPROTOOPT;
1861 if (get_user(len, optlen))
1868 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1873 sec.level = l2cap_pi(sk)->sec_level;
1875 len = min_t(unsigned int, len, sizeof(sec));
1876 if (copy_to_user(optval, (char *) &sec, len))
1881 case BT_DEFER_SETUP:
1882 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1887 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1901 static int l2cap_sock_shutdown(struct socket *sock, int how)
1903 struct sock *sk = sock->sk;
1906 BT_DBG("sock %p, sk %p", sock, sk);
1912 if (!sk->sk_shutdown) {
1913 sk->sk_shutdown = SHUTDOWN_MASK;
1914 l2cap_sock_clear_timer(sk);
1915 __l2cap_sock_close(sk, 0);
1917 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1918 err = bt_sock_wait_state(sk, BT_CLOSED,
1925 static int l2cap_sock_release(struct socket *sock)
1927 struct sock *sk = sock->sk;
1930 BT_DBG("sock %p, sk %p", sock, sk);
1935 err = l2cap_sock_shutdown(sock, 2);
1938 l2cap_sock_kill(sk);
1942 static void l2cap_chan_ready(struct sock *sk)
1944 struct sock *parent = bt_sk(sk)->parent;
1946 BT_DBG("sk %p, parent %p", sk, parent);
1948 l2cap_pi(sk)->conf_state = 0;
1949 l2cap_sock_clear_timer(sk);
1952 /* Outgoing channel.
1953 * Wake up socket sleeping on connect.
1955 sk->sk_state = BT_CONNECTED;
1956 sk->sk_state_change(sk);
1958 /* Incoming channel.
1959 * Wake up socket sleeping on accept.
1961 parent->sk_data_ready(parent, 0);
1965 /* Copy frame to all raw sockets on that connection */
1966 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1968 struct l2cap_chan_list *l = &conn->chan_list;
1969 struct sk_buff *nskb;
1972 BT_DBG("conn %p", conn);
1974 read_lock(&l->lock);
1975 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1976 if (sk->sk_type != SOCK_RAW)
1979 /* Don't send frame to the socket it came from */
1982 nskb = skb_clone(skb, GFP_ATOMIC);
1986 if (sock_queue_rcv_skb(sk, nskb))
1989 read_unlock(&l->lock);
1992 /* ---- L2CAP signalling commands ---- */
1993 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1994 u8 code, u8 ident, u16 dlen, void *data)
1996 struct sk_buff *skb, **frag;
1997 struct l2cap_cmd_hdr *cmd;
1998 struct l2cap_hdr *lh;
2001 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2002 conn, code, ident, dlen);
2004 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2005 count = min_t(unsigned int, conn->mtu, len);
2007 skb = bt_skb_alloc(count, GFP_ATOMIC);
2011 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2012 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2013 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2015 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2018 cmd->len = cpu_to_le16(dlen);
2021 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2022 memcpy(skb_put(skb, count), data, count);
2028 /* Continuation fragments (no L2CAP header) */
2029 frag = &skb_shinfo(skb)->frag_list;
2031 count = min_t(unsigned int, conn->mtu, len);
2033 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2037 memcpy(skb_put(*frag, count), data, count);
2042 frag = &(*frag)->next;
2052 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2054 struct l2cap_conf_opt *opt = *ptr;
2057 len = L2CAP_CONF_OPT_SIZE + opt->len;
2065 *val = *((u8 *) opt->val);
2069 *val = __le16_to_cpu(*((__le16 *) opt->val));
2073 *val = __le32_to_cpu(*((__le32 *) opt->val));
2077 *val = (unsigned long) opt->val;
2081 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2085 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2087 struct l2cap_conf_opt *opt = *ptr;
2089 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2096 *((u8 *) opt->val) = val;
2100 *((__le16 *) opt->val) = cpu_to_le16(val);
2104 *((__le32 *) opt->val) = cpu_to_le32(val);
2108 memcpy(opt->val, (void *) val, len);
2112 *ptr += L2CAP_CONF_OPT_SIZE + len;
2115 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2117 u32 local_feat_mask = l2cap_feat_mask;
2119 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2122 case L2CAP_MODE_ERTM:
2123 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2124 case L2CAP_MODE_STREAMING:
2125 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2131 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2134 case L2CAP_MODE_STREAMING:
2135 case L2CAP_MODE_ERTM:
2136 if (l2cap_mode_supported(mode, remote_feat_mask))
2140 return L2CAP_MODE_BASIC;
2144 static int l2cap_build_conf_req(struct sock *sk, void *data)
2146 struct l2cap_pinfo *pi = l2cap_pi(sk);
2147 struct l2cap_conf_req *req = data;
2148 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2149 void *ptr = req->data;
2151 BT_DBG("sk %p", sk);
2153 if (pi->num_conf_req || pi->num_conf_rsp)
2157 case L2CAP_MODE_STREAMING:
2158 case L2CAP_MODE_ERTM:
2159 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2160 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2161 l2cap_send_disconn_req(pi->conn, sk);
2164 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2170 case L2CAP_MODE_BASIC:
2171 if (pi->imtu != L2CAP_DEFAULT_MTU)
2172 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2175 case L2CAP_MODE_ERTM:
2176 rfc.mode = L2CAP_MODE_ERTM;
2177 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2178 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2179 rfc.retrans_timeout = 0;
2180 rfc.monitor_timeout = 0;
2181 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2183 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2184 sizeof(rfc), (unsigned long) &rfc);
2186 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2189 if (pi->fcs == L2CAP_FCS_NONE ||
2190 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2191 pi->fcs = L2CAP_FCS_NONE;
2192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2196 case L2CAP_MODE_STREAMING:
2197 rfc.mode = L2CAP_MODE_STREAMING;
2199 rfc.max_transmit = 0;
2200 rfc.retrans_timeout = 0;
2201 rfc.monitor_timeout = 0;
2202 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2205 sizeof(rfc), (unsigned long) &rfc);
2207 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2210 if (pi->fcs == L2CAP_FCS_NONE ||
2211 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2212 pi->fcs = L2CAP_FCS_NONE;
2213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2218 /* FIXME: Need actual value of the flush timeout */
2219 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2220 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2222 req->dcid = cpu_to_le16(pi->dcid);
2223 req->flags = cpu_to_le16(0);
2228 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2230 struct l2cap_pinfo *pi = l2cap_pi(sk);
2231 struct l2cap_conf_rsp *rsp = data;
2232 void *ptr = rsp->data;
2233 void *req = pi->conf_req;
2234 int len = pi->conf_len;
2235 int type, hint, olen;
2237 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2238 u16 mtu = L2CAP_DEFAULT_MTU;
2239 u16 result = L2CAP_CONF_SUCCESS;
2241 BT_DBG("sk %p", sk);
2243 while (len >= L2CAP_CONF_OPT_SIZE) {
2244 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2246 hint = type & L2CAP_CONF_HINT;
2247 type &= L2CAP_CONF_MASK;
2250 case L2CAP_CONF_MTU:
2254 case L2CAP_CONF_FLUSH_TO:
2258 case L2CAP_CONF_QOS:
2261 case L2CAP_CONF_RFC:
2262 if (olen == sizeof(rfc))
2263 memcpy(&rfc, (void *) val, olen);
2266 case L2CAP_CONF_FCS:
2267 if (val == L2CAP_FCS_NONE)
2268 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2276 result = L2CAP_CONF_UNKNOWN;
2277 *((u8 *) ptr++) = type;
2282 if (pi->num_conf_rsp || pi->num_conf_req)
2286 case L2CAP_MODE_STREAMING:
2287 case L2CAP_MODE_ERTM:
2288 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2289 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2290 return -ECONNREFUSED;
2293 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2298 if (pi->mode != rfc.mode) {
2299 result = L2CAP_CONF_UNACCEPT;
2300 rfc.mode = pi->mode;
2302 if (pi->num_conf_rsp == 1)
2303 return -ECONNREFUSED;
2305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2306 sizeof(rfc), (unsigned long) &rfc);
2310 if (result == L2CAP_CONF_SUCCESS) {
2311 /* Configure output options and let the other side know
2312 * which ones we don't like. */
2314 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2315 result = L2CAP_CONF_UNACCEPT;
2318 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2323 case L2CAP_MODE_BASIC:
2324 pi->fcs = L2CAP_FCS_NONE;
2325 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2328 case L2CAP_MODE_ERTM:
2329 pi->remote_tx_win = rfc.txwin_size;
2330 pi->remote_max_tx = rfc.max_transmit;
2331 pi->max_pdu_size = rfc.max_pdu_size;
2333 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2334 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2336 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2339 case L2CAP_MODE_STREAMING:
2340 pi->remote_tx_win = rfc.txwin_size;
2341 pi->max_pdu_size = rfc.max_pdu_size;
2343 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2347 result = L2CAP_CONF_UNACCEPT;
2349 memset(&rfc, 0, sizeof(rfc));
2350 rfc.mode = pi->mode;
2353 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2354 sizeof(rfc), (unsigned long) &rfc);
2356 if (result == L2CAP_CONF_SUCCESS)
2357 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2359 rsp->scid = cpu_to_le16(pi->dcid);
2360 rsp->result = cpu_to_le16(result);
2361 rsp->flags = cpu_to_le16(0x0000);
2366 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2368 struct l2cap_pinfo *pi = l2cap_pi(sk);
2369 struct l2cap_conf_req *req = data;
2370 void *ptr = req->data;
2373 struct l2cap_conf_rfc rfc;
2375 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2377 while (len >= L2CAP_CONF_OPT_SIZE) {
2378 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2381 case L2CAP_CONF_MTU:
2382 if (val < L2CAP_DEFAULT_MIN_MTU) {
2383 *result = L2CAP_CONF_UNACCEPT;
2384 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2390 case L2CAP_CONF_FLUSH_TO:
2392 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2396 case L2CAP_CONF_RFC:
2397 if (olen == sizeof(rfc))
2398 memcpy(&rfc, (void *)val, olen);
2400 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2401 rfc.mode != pi->mode)
2402 return -ECONNREFUSED;
2404 pi->mode = rfc.mode;
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2408 sizeof(rfc), (unsigned long) &rfc);
2413 if (*result == L2CAP_CONF_SUCCESS) {
2415 case L2CAP_MODE_ERTM:
2416 pi->remote_tx_win = rfc.txwin_size;
2417 pi->retrans_timeout = rfc.retrans_timeout;
2418 pi->monitor_timeout = rfc.monitor_timeout;
2419 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2421 case L2CAP_MODE_STREAMING:
2422 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2427 req->dcid = cpu_to_le16(pi->dcid);
2428 req->flags = cpu_to_le16(0x0000);
2433 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2435 struct l2cap_conf_rsp *rsp = data;
2436 void *ptr = rsp->data;
2438 BT_DBG("sk %p", sk);
2440 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2441 rsp->result = cpu_to_le16(result);
2442 rsp->flags = cpu_to_le16(flags);
2447 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2449 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2451 if (rej->reason != 0x0000)
2454 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2455 cmd->ident == conn->info_ident) {
2456 del_timer(&conn->info_timer);
2458 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2459 conn->info_ident = 0;
2461 l2cap_conn_start(conn);
2467 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2469 struct l2cap_chan_list *list = &conn->chan_list;
2470 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2471 struct l2cap_conn_rsp rsp;
2472 struct sock *sk, *parent;
2473 int result, status = L2CAP_CS_NO_INFO;
2475 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2476 __le16 psm = req->psm;
2478 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2480 /* Check if we have socket listening on psm */
2481 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2483 result = L2CAP_CR_BAD_PSM;
2487 /* Check if the ACL is secure enough (if not SDP) */
2488 if (psm != cpu_to_le16(0x0001) &&
2489 !hci_conn_check_link_mode(conn->hcon)) {
2490 conn->disc_reason = 0x05;
2491 result = L2CAP_CR_SEC_BLOCK;
2495 result = L2CAP_CR_NO_MEM;
2497 /* Check for backlog size */
2498 if (sk_acceptq_is_full(parent)) {
2499 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2503 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2507 write_lock_bh(&list->lock);
2509 /* Check if we already have channel with that dcid */
2510 if (__l2cap_get_chan_by_dcid(list, scid)) {
2511 write_unlock_bh(&list->lock);
2512 sock_set_flag(sk, SOCK_ZAPPED);
2513 l2cap_sock_kill(sk);
2517 hci_conn_hold(conn->hcon);
2519 l2cap_sock_init(sk, parent);
2520 bacpy(&bt_sk(sk)->src, conn->src);
2521 bacpy(&bt_sk(sk)->dst, conn->dst);
2522 l2cap_pi(sk)->psm = psm;
2523 l2cap_pi(sk)->dcid = scid;
2525 __l2cap_chan_add(conn, sk, parent);
2526 dcid = l2cap_pi(sk)->scid;
2528 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2530 l2cap_pi(sk)->ident = cmd->ident;
2532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2533 if (l2cap_check_security(sk)) {
2534 if (bt_sk(sk)->defer_setup) {
2535 sk->sk_state = BT_CONNECT2;
2536 result = L2CAP_CR_PEND;
2537 status = L2CAP_CS_AUTHOR_PEND;
2538 parent->sk_data_ready(parent, 0);
2540 sk->sk_state = BT_CONFIG;
2541 result = L2CAP_CR_SUCCESS;
2542 status = L2CAP_CS_NO_INFO;
2545 sk->sk_state = BT_CONNECT2;
2546 result = L2CAP_CR_PEND;
2547 status = L2CAP_CS_AUTHEN_PEND;
2550 sk->sk_state = BT_CONNECT2;
2551 result = L2CAP_CR_PEND;
2552 status = L2CAP_CS_NO_INFO;
2555 write_unlock_bh(&list->lock);
2558 bh_unlock_sock(parent);
2561 rsp.scid = cpu_to_le16(scid);
2562 rsp.dcid = cpu_to_le16(dcid);
2563 rsp.result = cpu_to_le16(result);
2564 rsp.status = cpu_to_le16(status);
2565 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2567 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2568 struct l2cap_info_req info;
2569 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2572 conn->info_ident = l2cap_get_ident(conn);
2574 mod_timer(&conn->info_timer, jiffies +
2575 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2577 l2cap_send_cmd(conn, conn->info_ident,
2578 L2CAP_INFO_REQ, sizeof(info), &info);
2584 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2586 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2587 u16 scid, dcid, result, status;
2591 scid = __le16_to_cpu(rsp->scid);
2592 dcid = __le16_to_cpu(rsp->dcid);
2593 result = __le16_to_cpu(rsp->result);
2594 status = __le16_to_cpu(rsp->status);
2596 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2599 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2603 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2609 case L2CAP_CR_SUCCESS:
2610 sk->sk_state = BT_CONFIG;
2611 l2cap_pi(sk)->ident = 0;
2612 l2cap_pi(sk)->dcid = dcid;
2613 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2615 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2617 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2618 l2cap_build_conf_req(sk, req), req);
2619 l2cap_pi(sk)->num_conf_req++;
2623 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2627 l2cap_chan_del(sk, ECONNREFUSED);
2635 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2637 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2643 dcid = __le16_to_cpu(req->dcid);
2644 flags = __le16_to_cpu(req->flags);
2646 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2648 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2652 if (sk->sk_state == BT_DISCONN)
2655 /* Reject if config buffer is too small. */
2656 len = cmd_len - sizeof(*req);
2657 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2659 l2cap_build_conf_rsp(sk, rsp,
2660 L2CAP_CONF_REJECT, flags), rsp);
2665 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2666 l2cap_pi(sk)->conf_len += len;
2668 if (flags & 0x0001) {
2669 /* Incomplete config. Send empty response. */
2670 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2671 l2cap_build_conf_rsp(sk, rsp,
2672 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2676 /* Complete config. */
2677 len = l2cap_parse_conf_req(sk, rsp);
2679 l2cap_send_disconn_req(conn, sk);
2683 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2684 l2cap_pi(sk)->num_conf_rsp++;
2686 /* Reset config buffer. */
2687 l2cap_pi(sk)->conf_len = 0;
2689 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2692 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2693 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2694 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2695 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2697 sk->sk_state = BT_CONNECTED;
2698 l2cap_pi(sk)->next_tx_seq = 0;
2699 l2cap_pi(sk)->expected_ack_seq = 0;
2700 l2cap_pi(sk)->unacked_frames = 0;
2702 setup_timer(&l2cap_pi(sk)->retrans_timer,
2703 l2cap_retrans_timeout, (unsigned long) sk);
2704 setup_timer(&l2cap_pi(sk)->monitor_timer,
2705 l2cap_monitor_timeout, (unsigned long) sk);
2707 __skb_queue_head_init(TX_QUEUE(sk));
2708 l2cap_chan_ready(sk);
2712 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2714 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2715 l2cap_build_conf_req(sk, buf), buf);
2716 l2cap_pi(sk)->num_conf_req++;
2724 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2726 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2727 u16 scid, flags, result;
2730 scid = __le16_to_cpu(rsp->scid);
2731 flags = __le16_to_cpu(rsp->flags);
2732 result = __le16_to_cpu(rsp->result);
2734 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2735 scid, flags, result);
2737 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2742 case L2CAP_CONF_SUCCESS:
2745 case L2CAP_CONF_UNACCEPT:
2746 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2747 int len = cmd->len - sizeof(*rsp);
2750 /* throw out any old stored conf requests */
2751 result = L2CAP_CONF_SUCCESS;
2752 len = l2cap_parse_conf_rsp(sk, rsp->data,
2755 l2cap_send_disconn_req(conn, sk);
2759 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2760 L2CAP_CONF_REQ, len, req);
2761 l2cap_pi(sk)->num_conf_req++;
2762 if (result != L2CAP_CONF_SUCCESS)
2768 sk->sk_state = BT_DISCONN;
2769 sk->sk_err = ECONNRESET;
2770 l2cap_sock_set_timer(sk, HZ * 5);
2771 l2cap_send_disconn_req(conn, sk);
2778 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2780 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2781 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV)
2782 || l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2783 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2785 sk->sk_state = BT_CONNECTED;
2786 l2cap_pi(sk)->expected_tx_seq = 0;
2787 l2cap_pi(sk)->num_to_ack = 0;
2788 __skb_queue_head_init(TX_QUEUE(sk));
2789 l2cap_chan_ready(sk);
2797 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2799 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2800 struct l2cap_disconn_rsp rsp;
2804 scid = __le16_to_cpu(req->scid);
2805 dcid = __le16_to_cpu(req->dcid);
2807 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2809 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2813 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2814 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2815 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2817 sk->sk_shutdown = SHUTDOWN_MASK;
2819 skb_queue_purge(TX_QUEUE(sk));
2820 del_timer(&l2cap_pi(sk)->retrans_timer);
2821 del_timer(&l2cap_pi(sk)->monitor_timer);
2823 l2cap_chan_del(sk, ECONNRESET);
2826 l2cap_sock_kill(sk);
2830 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2832 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2836 scid = __le16_to_cpu(rsp->scid);
2837 dcid = __le16_to_cpu(rsp->dcid);
2839 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2841 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2845 skb_queue_purge(TX_QUEUE(sk));
2846 del_timer(&l2cap_pi(sk)->retrans_timer);
2847 del_timer(&l2cap_pi(sk)->monitor_timer);
2849 l2cap_chan_del(sk, 0);
2852 l2cap_sock_kill(sk);
2856 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2858 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2861 type = __le16_to_cpu(req->type);
2863 BT_DBG("type 0x%4.4x", type);
2865 if (type == L2CAP_IT_FEAT_MASK) {
2867 u32 feat_mask = l2cap_feat_mask;
2868 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2869 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2870 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2872 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2874 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2875 l2cap_send_cmd(conn, cmd->ident,
2876 L2CAP_INFO_RSP, sizeof(buf), buf);
2877 } else if (type == L2CAP_IT_FIXED_CHAN) {
2879 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2880 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2881 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2882 memcpy(buf + 4, l2cap_fixed_chan, 8);
2883 l2cap_send_cmd(conn, cmd->ident,
2884 L2CAP_INFO_RSP, sizeof(buf), buf);
2886 struct l2cap_info_rsp rsp;
2887 rsp.type = cpu_to_le16(type);
2888 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2889 l2cap_send_cmd(conn, cmd->ident,
2890 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2896 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2898 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2901 type = __le16_to_cpu(rsp->type);
2902 result = __le16_to_cpu(rsp->result);
2904 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2906 del_timer(&conn->info_timer);
2908 if (type == L2CAP_IT_FEAT_MASK) {
2909 conn->feat_mask = get_unaligned_le32(rsp->data);
2911 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2912 struct l2cap_info_req req;
2913 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2915 conn->info_ident = l2cap_get_ident(conn);
2917 l2cap_send_cmd(conn, conn->info_ident,
2918 L2CAP_INFO_REQ, sizeof(req), &req);
2920 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2921 conn->info_ident = 0;
2923 l2cap_conn_start(conn);
2925 } else if (type == L2CAP_IT_FIXED_CHAN) {
2926 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2927 conn->info_ident = 0;
2929 l2cap_conn_start(conn);
2935 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2937 u8 *data = skb->data;
2939 struct l2cap_cmd_hdr cmd;
2942 l2cap_raw_recv(conn, skb);
2944 while (len >= L2CAP_CMD_HDR_SIZE) {
2946 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2947 data += L2CAP_CMD_HDR_SIZE;
2948 len -= L2CAP_CMD_HDR_SIZE;
2950 cmd_len = le16_to_cpu(cmd.len);
2952 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2954 if (cmd_len > len || !cmd.ident) {
2955 BT_DBG("corrupted command");
2960 case L2CAP_COMMAND_REJ:
2961 l2cap_command_rej(conn, &cmd, data);
2964 case L2CAP_CONN_REQ:
2965 err = l2cap_connect_req(conn, &cmd, data);
2968 case L2CAP_CONN_RSP:
2969 err = l2cap_connect_rsp(conn, &cmd, data);
2972 case L2CAP_CONF_REQ:
2973 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2976 case L2CAP_CONF_RSP:
2977 err = l2cap_config_rsp(conn, &cmd, data);
2980 case L2CAP_DISCONN_REQ:
2981 err = l2cap_disconnect_req(conn, &cmd, data);
2984 case L2CAP_DISCONN_RSP:
2985 err = l2cap_disconnect_rsp(conn, &cmd, data);
2988 case L2CAP_ECHO_REQ:
2989 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2992 case L2CAP_ECHO_RSP:
2995 case L2CAP_INFO_REQ:
2996 err = l2cap_information_req(conn, &cmd, data);
2999 case L2CAP_INFO_RSP:
3000 err = l2cap_information_rsp(conn, &cmd, data);
3004 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3010 struct l2cap_cmd_rej rej;
3011 BT_DBG("error %d", err);
3013 /* FIXME: Map err to a valid reason */
3014 rej.reason = cpu_to_le16(0);
3015 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3025 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3027 u16 our_fcs, rcv_fcs;
3028 int hdr_size = L2CAP_HDR_SIZE + 2;
3030 if (pi->fcs == L2CAP_FCS_CRC16) {
3031 skb_trim(skb, skb->len - 2);
3032 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3033 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3035 if (our_fcs != rcv_fcs)
3041 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3043 struct l2cap_pinfo *pi = l2cap_pi(sk);
3044 struct sk_buff *_skb;
3047 switch (control & L2CAP_CTRL_SAR) {
3048 case L2CAP_SDU_UNSEGMENTED:
3049 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3054 err = sock_queue_rcv_skb(sk, skb);
3060 case L2CAP_SDU_START:
3061 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3066 pi->sdu_len = get_unaligned_le16(skb->data);
3069 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3075 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3077 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3078 pi->partial_sdu_len = skb->len;
3082 case L2CAP_SDU_CONTINUE:
3083 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3086 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3088 pi->partial_sdu_len += skb->len;
3089 if (pi->partial_sdu_len > pi->sdu_len)
3097 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3100 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3102 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3103 pi->partial_sdu_len += skb->len;
3105 if (pi->partial_sdu_len == pi->sdu_len) {
3106 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3107 err = sock_queue_rcv_skb(sk, _skb);
3121 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3123 struct l2cap_pinfo *pi = l2cap_pi(sk);
3124 u8 tx_seq = __get_txseq(rx_control);
3128 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3130 if (tx_seq == pi->expected_tx_seq) {
3131 if (pi->conn_state & L2CAP_CONN_UNDER_REJ)
3132 pi->conn_state &= ~L2CAP_CONN_UNDER_REJ;
3134 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3138 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3139 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3140 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3141 tx_control |= L2CAP_SUPER_RCV_READY;
3142 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3146 /* Unexpected txSeq. Send a REJ S-frame */
3148 if (!(pi->conn_state & L2CAP_CONN_UNDER_REJ)) {
3149 tx_control |= L2CAP_SUPER_REJECT;
3150 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3151 pi->conn_state |= L2CAP_CONN_UNDER_REJ;
3159 return l2cap_send_sframe(pi, tx_control);
3162 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3164 struct l2cap_pinfo *pi = l2cap_pi(sk);
3166 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3168 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3169 case L2CAP_SUPER_RCV_READY:
3170 if (rx_control & L2CAP_CTRL_POLL) {
3171 u16 control = L2CAP_CTRL_FINAL;
3172 control |= L2CAP_SUPER_RCV_READY;
3173 l2cap_send_sframe(l2cap_pi(sk), control);
3174 } else if (rx_control & L2CAP_CTRL_FINAL) {
3175 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3178 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3179 del_timer(&pi->monitor_timer);
3181 if (pi->unacked_frames > 0)
3182 __mod_retrans_timer();
3184 pi->expected_ack_seq = __get_reqseq(rx_control);
3185 l2cap_drop_acked_frames(sk);
3186 if (pi->unacked_frames > 0)
3187 __mod_retrans_timer();
3188 l2cap_ertm_send(sk);
3192 case L2CAP_SUPER_REJECT:
3193 pi->expected_ack_seq = __get_reqseq(rx_control);
3194 l2cap_drop_acked_frames(sk);
3196 sk->sk_send_head = TX_QUEUE(sk)->next;
3197 pi->next_tx_seq = pi->expected_ack_seq;
3199 l2cap_ertm_send(sk);
3203 case L2CAP_SUPER_RCV_NOT_READY:
3204 case L2CAP_SUPER_SELECT_REJECT:
3211 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3214 struct l2cap_pinfo *pi;
3219 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3221 BT_DBG("unknown cid 0x%4.4x", cid);
3227 BT_DBG("sk %p, len %d", sk, skb->len);
3229 if (sk->sk_state != BT_CONNECTED)
3233 case L2CAP_MODE_BASIC:
3234 /* If socket recv buffers overflows we drop data here
3235 * which is *bad* because L2CAP has to be reliable.
3236 * But we don't have any other choice. L2CAP doesn't
3237 * provide flow control mechanism. */
3239 if (pi->imtu < skb->len)
3242 if (!sock_queue_rcv_skb(sk, skb))
3246 case L2CAP_MODE_ERTM:
3247 control = get_unaligned_le16(skb->data);
3251 if (__is_sar_start(control))
3254 if (pi->fcs == L2CAP_FCS_CRC16)
3258 * We can just drop the corrupted I-frame here.
3259 * Receiver will miss it and start proper recovery
3260 * procedures and ask retransmission.
3262 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3265 if (l2cap_check_fcs(pi, skb))
3268 if (__is_iframe(control))
3269 err = l2cap_data_channel_iframe(sk, control, skb);
3271 err = l2cap_data_channel_sframe(sk, control, skb);
3277 case L2CAP_MODE_STREAMING:
3278 control = get_unaligned_le16(skb->data);
3282 if (__is_sar_start(control))
3285 if (pi->fcs == L2CAP_FCS_CRC16)
3288 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3291 if (l2cap_check_fcs(pi, skb))
3294 tx_seq = __get_txseq(control);
3296 if (pi->expected_tx_seq == tx_seq)
3297 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3299 pi->expected_tx_seq = tx_seq + 1;
3301 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3306 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3320 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3324 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3328 BT_DBG("sk %p, len %d", sk, skb->len);
3330 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3333 if (l2cap_pi(sk)->imtu < skb->len)
3336 if (!sock_queue_rcv_skb(sk, skb))
3348 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3350 struct l2cap_hdr *lh = (void *) skb->data;
3354 skb_pull(skb, L2CAP_HDR_SIZE);
3355 cid = __le16_to_cpu(lh->cid);
3356 len = __le16_to_cpu(lh->len);
3358 if (len != skb->len) {
3363 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3366 case L2CAP_CID_SIGNALING:
3367 l2cap_sig_channel(conn, skb);
3370 case L2CAP_CID_CONN_LESS:
3371 psm = get_unaligned((__le16 *) skb->data);
3373 l2cap_conless_channel(conn, psm, skb);
3377 l2cap_data_channel(conn, cid, skb);
3382 /* ---- L2CAP interface with lower layer (HCI) ---- */
3384 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3386 int exact = 0, lm1 = 0, lm2 = 0;
3387 register struct sock *sk;
3388 struct hlist_node *node;
3390 if (type != ACL_LINK)
3393 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3395 /* Find listening sockets and check their link_mode */
3396 read_lock(&l2cap_sk_list.lock);
3397 sk_for_each(sk, node, &l2cap_sk_list.head) {
3398 if (sk->sk_state != BT_LISTEN)
3401 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3402 lm1 |= HCI_LM_ACCEPT;
3403 if (l2cap_pi(sk)->role_switch)
3404 lm1 |= HCI_LM_MASTER;
3406 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3407 lm2 |= HCI_LM_ACCEPT;
3408 if (l2cap_pi(sk)->role_switch)
3409 lm2 |= HCI_LM_MASTER;
3412 read_unlock(&l2cap_sk_list.lock);
3414 return exact ? lm1 : lm2;
3417 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3419 struct l2cap_conn *conn;
3421 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3423 if (hcon->type != ACL_LINK)
3427 conn = l2cap_conn_add(hcon, status);
3429 l2cap_conn_ready(conn);
3431 l2cap_conn_del(hcon, bt_err(status));
3436 static int l2cap_disconn_ind(struct hci_conn *hcon)
3438 struct l2cap_conn *conn = hcon->l2cap_data;
3440 BT_DBG("hcon %p", hcon);
3442 if (hcon->type != ACL_LINK || !conn)
3445 return conn->disc_reason;
3448 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3450 BT_DBG("hcon %p reason %d", hcon, reason);
3452 if (hcon->type != ACL_LINK)
3455 l2cap_conn_del(hcon, bt_err(reason));
3460 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3462 if (sk->sk_type != SOCK_SEQPACKET)
3465 if (encrypt == 0x00) {
3466 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3467 l2cap_sock_clear_timer(sk);
3468 l2cap_sock_set_timer(sk, HZ * 5);
3469 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3470 __l2cap_sock_close(sk, ECONNREFUSED);
3472 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3473 l2cap_sock_clear_timer(sk);
3477 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3479 struct l2cap_chan_list *l;
3480 struct l2cap_conn *conn = hcon->l2cap_data;
3486 l = &conn->chan_list;
3488 BT_DBG("conn %p", conn);
3490 read_lock(&l->lock);
3492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3495 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3500 if (!status && (sk->sk_state == BT_CONNECTED ||
3501 sk->sk_state == BT_CONFIG)) {
3502 l2cap_check_encryption(sk, encrypt);
3507 if (sk->sk_state == BT_CONNECT) {
3509 struct l2cap_conn_req req;
3510 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3511 req.psm = l2cap_pi(sk)->psm;
3513 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3515 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3516 L2CAP_CONN_REQ, sizeof(req), &req);
3518 l2cap_sock_clear_timer(sk);
3519 l2cap_sock_set_timer(sk, HZ / 10);
3521 } else if (sk->sk_state == BT_CONNECT2) {
3522 struct l2cap_conn_rsp rsp;
3526 sk->sk_state = BT_CONFIG;
3527 result = L2CAP_CR_SUCCESS;
3529 sk->sk_state = BT_DISCONN;
3530 l2cap_sock_set_timer(sk, HZ / 10);
3531 result = L2CAP_CR_SEC_BLOCK;
3534 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3535 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3536 rsp.result = cpu_to_le16(result);
3537 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3538 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3539 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3545 read_unlock(&l->lock);
3550 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3552 struct l2cap_conn *conn = hcon->l2cap_data;
3554 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3557 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3559 if (flags & ACL_START) {
3560 struct l2cap_hdr *hdr;
3564 BT_ERR("Unexpected start frame (len %d)", skb->len);
3565 kfree_skb(conn->rx_skb);
3566 conn->rx_skb = NULL;
3568 l2cap_conn_unreliable(conn, ECOMM);
3572 BT_ERR("Frame is too short (len %d)", skb->len);
3573 l2cap_conn_unreliable(conn, ECOMM);
3577 hdr = (struct l2cap_hdr *) skb->data;
3578 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3580 if (len == skb->len) {
3581 /* Complete frame received */
3582 l2cap_recv_frame(conn, skb);
3586 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3588 if (skb->len > len) {
3589 BT_ERR("Frame is too long (len %d, expected len %d)",
3591 l2cap_conn_unreliable(conn, ECOMM);
3595 /* Allocate skb for the complete frame (with header) */
3596 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3600 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3602 conn->rx_len = len - skb->len;
3604 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3606 if (!conn->rx_len) {
3607 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3608 l2cap_conn_unreliable(conn, ECOMM);
3612 if (skb->len > conn->rx_len) {
3613 BT_ERR("Fragment is too long (len %d, expected %d)",
3614 skb->len, conn->rx_len);
3615 kfree_skb(conn->rx_skb);
3616 conn->rx_skb = NULL;
3618 l2cap_conn_unreliable(conn, ECOMM);
3622 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3624 conn->rx_len -= skb->len;
3626 if (!conn->rx_len) {
3627 /* Complete frame received */
3628 l2cap_recv_frame(conn, conn->rx_skb);
3629 conn->rx_skb = NULL;
3638 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3641 struct hlist_node *node;
3644 read_lock_bh(&l2cap_sk_list.lock);
3646 sk_for_each(sk, node, &l2cap_sk_list.head) {
3647 struct l2cap_pinfo *pi = l2cap_pi(sk);
3649 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3650 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3651 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3652 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3655 read_unlock_bh(&l2cap_sk_list.lock);
3660 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3662 static const struct proto_ops l2cap_sock_ops = {
3663 .family = PF_BLUETOOTH,
3664 .owner = THIS_MODULE,
3665 .release = l2cap_sock_release,
3666 .bind = l2cap_sock_bind,
3667 .connect = l2cap_sock_connect,
3668 .listen = l2cap_sock_listen,
3669 .accept = l2cap_sock_accept,
3670 .getname = l2cap_sock_getname,
3671 .sendmsg = l2cap_sock_sendmsg,
3672 .recvmsg = l2cap_sock_recvmsg,
3673 .poll = bt_sock_poll,
3674 .ioctl = bt_sock_ioctl,
3675 .mmap = sock_no_mmap,
3676 .socketpair = sock_no_socketpair,
3677 .shutdown = l2cap_sock_shutdown,
3678 .setsockopt = l2cap_sock_setsockopt,
3679 .getsockopt = l2cap_sock_getsockopt
3682 static struct net_proto_family l2cap_sock_family_ops = {
3683 .family = PF_BLUETOOTH,
3684 .owner = THIS_MODULE,
3685 .create = l2cap_sock_create,
3688 static struct hci_proto l2cap_hci_proto = {
3690 .id = HCI_PROTO_L2CAP,
3691 .connect_ind = l2cap_connect_ind,
3692 .connect_cfm = l2cap_connect_cfm,
3693 .disconn_ind = l2cap_disconn_ind,
3694 .disconn_cfm = l2cap_disconn_cfm,
3695 .security_cfm = l2cap_security_cfm,
3696 .recv_acldata = l2cap_recv_acldata
3699 static int __init l2cap_init(void)
3703 err = proto_register(&l2cap_proto, 0);
3707 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3709 BT_ERR("L2CAP socket registration failed");
3713 err = hci_register_proto(&l2cap_hci_proto);
3715 BT_ERR("L2CAP protocol registration failed");
3716 bt_sock_unregister(BTPROTO_L2CAP);
3720 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3721 BT_ERR("Failed to create L2CAP info file");
3723 BT_INFO("L2CAP ver %s", VERSION);
3724 BT_INFO("L2CAP socket layer initialized");
3729 proto_unregister(&l2cap_proto);
3733 static void __exit l2cap_exit(void)
3735 class_remove_file(bt_class, &class_attr_l2cap);
3737 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3738 BT_ERR("L2CAP socket unregistration failed");
3740 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3741 BT_ERR("L2CAP protocol unregistration failed");
3743 proto_unregister(&l2cap_proto);
3746 void l2cap_load(void)
3748 /* Dummy function to trigger automatic L2CAP module loading by
3749 * other modules that use L2CAP sockets but don't use any other
3750 * symbols from it. */
3753 EXPORT_SYMBOL(l2cap_load);
3755 module_init(l2cap_init);
3756 module_exit(l2cap_exit);
3758 module_param(enable_ertm, bool, 0644);
3759 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3761 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3762 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3763 MODULE_VERSION(VERSION);
3764 MODULE_LICENSE("GPL");
3765 MODULE_ALIAS("bt-proto-0");