2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
44 #include <linux/crc16.h>
47 #include <asm/system.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52 #include <net/bluetooth/l2cap.h>
54 #define VERSION "2.14"
56 static int enable_ertm = 0;
58 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
59 static u8 l2cap_fixed_chan[8] = { 0x02, };
61 static const struct proto_ops l2cap_sock_ops;
63 static struct bt_sock_list l2cap_sk_list = {
64 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 static void __l2cap_sock_close(struct sock *sk, int reason);
68 static void l2cap_sock_close(struct sock *sk);
69 static void l2cap_sock_kill(struct sock *sk);
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
74 /* ---- L2CAP timers ---- */
75 static void l2cap_sock_timeout(unsigned long arg)
77 struct sock *sk = (struct sock *) arg;
80 BT_DBG("sock %p state %d", sk, sk->sk_state);
84 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
85 reason = ECONNREFUSED;
86 else if (sk->sk_state == BT_CONNECT &&
87 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
88 reason = ECONNREFUSED;
92 __l2cap_sock_close(sk, reason);
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
106 static void l2cap_sock_clear_timer(struct sock *sk)
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
139 s = __l2cap_get_chan_by_scid(l, cid);
142 read_unlock(&l->lock);
146 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
149 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
150 if (l2cap_pi(s)->ident == ident)
156 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 s = __l2cap_get_chan_by_ident(l, ident);
163 read_unlock(&l->lock);
167 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
169 u16 cid = L2CAP_CID_DYN_START;
171 for (; cid < L2CAP_CID_DYN_END; cid++) {
172 if (!__l2cap_get_chan_by_scid(l, cid))
179 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
184 l2cap_pi(l->head)->prev_c = sk;
186 l2cap_pi(sk)->next_c = l->head;
187 l2cap_pi(sk)->prev_c = NULL;
191 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
193 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
195 write_lock_bh(&l->lock);
200 l2cap_pi(next)->prev_c = prev;
202 l2cap_pi(prev)->next_c = next;
203 write_unlock_bh(&l->lock);
208 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
210 struct l2cap_chan_list *l = &conn->chan_list;
212 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
213 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
215 conn->disc_reason = 0x13;
217 l2cap_pi(sk)->conn = conn;
219 if (sk->sk_type == SOCK_SEQPACKET) {
220 /* Alloc CID for connection-oriented socket */
221 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
222 } else if (sk->sk_type == SOCK_DGRAM) {
223 /* Connectionless socket */
224 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
228 /* Raw socket can send/recv signalling messages only */
229 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
231 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 __l2cap_chan_link(l, sk);
237 bt_accept_enqueue(parent, sk);
241 * Must be called on the locked socket. */
242 static void l2cap_chan_del(struct sock *sk, int err)
244 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
245 struct sock *parent = bt_sk(sk)->parent;
247 l2cap_sock_clear_timer(sk);
249 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
252 /* Unlink from channel list */
253 l2cap_chan_unlink(&conn->chan_list, sk);
254 l2cap_pi(sk)->conn = NULL;
255 hci_conn_put(conn->hcon);
258 sk->sk_state = BT_CLOSED;
259 sock_set_flag(sk, SOCK_ZAPPED);
265 bt_accept_unlink(sk);
266 parent->sk_data_ready(parent, 0);
268 sk->sk_state_change(sk);
271 /* Service level security */
272 static inline int l2cap_check_security(struct sock *sk)
274 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
277 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
278 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
279 auth_type = HCI_AT_NO_BONDING_MITM;
281 auth_type = HCI_AT_NO_BONDING;
283 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
284 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
286 switch (l2cap_pi(sk)->sec_level) {
287 case BT_SECURITY_HIGH:
288 auth_type = HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 auth_type = HCI_AT_GENERAL_BONDING;
294 auth_type = HCI_AT_NO_BONDING;
299 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
303 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn->lock);
315 if (++conn->tx_ident > 128)
320 spin_unlock_bh(&conn->lock);
325 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
327 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
329 BT_DBG("code 0x%2.2x", code);
334 return hci_send_acl(conn->hcon, skb, 0);
337 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
340 struct l2cap_hdr *lh;
341 struct l2cap_conn *conn = pi->conn;
342 int count, hlen = L2CAP_HDR_SIZE + 2;
344 if (pi->fcs == L2CAP_FCS_CRC16)
347 BT_DBG("pi %p, control 0x%2.2x", pi, control);
349 count = min_t(unsigned int, conn->mtu, hlen);
350 control |= L2CAP_CTRL_FRAME_TYPE;
352 skb = bt_skb_alloc(count, GFP_ATOMIC);
356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
357 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
358 lh->cid = cpu_to_le16(pi->dcid);
359 put_unaligned_le16(control, skb_put(skb, 2));
361 if (pi->fcs == L2CAP_FCS_CRC16) {
362 u16 fcs = crc16(0, (u8 *)lh, count - 2);
363 put_unaligned_le16(fcs, skb_put(skb, 2));
366 return hci_send_acl(pi->conn->hcon, skb, 0);
369 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
372 control |= L2CAP_SUPER_RCV_NOT_READY;
374 control |= L2CAP_SUPER_RCV_READY;
376 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
378 return l2cap_send_sframe(pi, control);
381 static void l2cap_do_start(struct sock *sk)
383 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
385 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
386 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
389 if (l2cap_check_security(sk)) {
390 struct l2cap_conn_req req;
391 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
392 req.psm = l2cap_pi(sk)->psm;
394 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
396 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
397 L2CAP_CONN_REQ, sizeof(req), &req);
400 struct l2cap_info_req req;
401 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
403 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
404 conn->info_ident = l2cap_get_ident(conn);
406 mod_timer(&conn->info_timer, jiffies +
407 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
409 l2cap_send_cmd(conn, conn->info_ident,
410 L2CAP_INFO_REQ, sizeof(req), &req);
414 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
416 struct l2cap_disconn_req req;
418 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
419 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
420 l2cap_send_cmd(conn, l2cap_get_ident(conn),
421 L2CAP_DISCONN_REQ, sizeof(req), &req);
424 /* ---- L2CAP connections ---- */
425 static void l2cap_conn_start(struct l2cap_conn *conn)
427 struct l2cap_chan_list *l = &conn->chan_list;
430 BT_DBG("conn %p", conn);
434 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
437 if (sk->sk_type != SOCK_SEQPACKET) {
442 if (sk->sk_state == BT_CONNECT) {
443 if (l2cap_check_security(sk)) {
444 struct l2cap_conn_req req;
445 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
446 req.psm = l2cap_pi(sk)->psm;
448 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
450 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
451 L2CAP_CONN_REQ, sizeof(req), &req);
453 } else if (sk->sk_state == BT_CONNECT2) {
454 struct l2cap_conn_rsp rsp;
455 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
456 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
458 if (l2cap_check_security(sk)) {
459 if (bt_sk(sk)->defer_setup) {
460 struct sock *parent = bt_sk(sk)->parent;
461 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
462 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
463 parent->sk_data_ready(parent, 0);
466 sk->sk_state = BT_CONFIG;
467 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
472 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
475 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
476 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
482 read_unlock(&l->lock);
485 static void l2cap_conn_ready(struct l2cap_conn *conn)
487 struct l2cap_chan_list *l = &conn->chan_list;
490 BT_DBG("conn %p", conn);
494 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
497 if (sk->sk_type != SOCK_SEQPACKET) {
498 l2cap_sock_clear_timer(sk);
499 sk->sk_state = BT_CONNECTED;
500 sk->sk_state_change(sk);
501 } else if (sk->sk_state == BT_CONNECT)
507 read_unlock(&l->lock);
510 /* Notify sockets that we cannot guaranty reliability anymore */
511 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
513 struct l2cap_chan_list *l = &conn->chan_list;
516 BT_DBG("conn %p", conn);
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 if (l2cap_pi(sk)->force_reliable)
525 read_unlock(&l->lock);
528 static void l2cap_info_timeout(unsigned long arg)
530 struct l2cap_conn *conn = (void *) arg;
532 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
533 conn->info_ident = 0;
535 l2cap_conn_start(conn);
538 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
540 struct l2cap_conn *conn = hcon->l2cap_data;
545 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
549 hcon->l2cap_data = conn;
552 BT_DBG("hcon %p conn %p", hcon, conn);
554 conn->mtu = hcon->hdev->acl_mtu;
555 conn->src = &hcon->hdev->bdaddr;
556 conn->dst = &hcon->dst;
560 spin_lock_init(&conn->lock);
561 rwlock_init(&conn->chan_list.lock);
563 setup_timer(&conn->info_timer, l2cap_info_timeout,
564 (unsigned long) conn);
566 conn->disc_reason = 0x13;
571 static void l2cap_conn_del(struct hci_conn *hcon, int err)
573 struct l2cap_conn *conn = hcon->l2cap_data;
579 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
581 kfree_skb(conn->rx_skb);
584 while ((sk = conn->chan_list.head)) {
586 l2cap_chan_del(sk, err);
591 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
592 del_timer_sync(&conn->info_timer);
594 hcon->l2cap_data = NULL;
598 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
600 struct l2cap_chan_list *l = &conn->chan_list;
601 write_lock_bh(&l->lock);
602 __l2cap_chan_add(conn, sk, parent);
603 write_unlock_bh(&l->lock);
606 /* ---- Socket interface ---- */
607 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
610 struct hlist_node *node;
611 sk_for_each(sk, node, &l2cap_sk_list.head)
612 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
619 /* Find socket with psm and source bdaddr.
620 * Returns closest match.
622 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
624 struct sock *sk = NULL, *sk1 = NULL;
625 struct hlist_node *node;
627 sk_for_each(sk, node, &l2cap_sk_list.head) {
628 if (state && sk->sk_state != state)
631 if (l2cap_pi(sk)->psm == psm) {
633 if (!bacmp(&bt_sk(sk)->src, src))
637 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
641 return node ? sk : sk1;
644 /* Find socket with given address (psm, src).
645 * Returns locked socket */
646 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 read_lock(&l2cap_sk_list.lock);
650 s = __l2cap_get_sock_by_psm(state, psm, src);
653 read_unlock(&l2cap_sk_list.lock);
657 static void l2cap_sock_destruct(struct sock *sk)
661 skb_queue_purge(&sk->sk_receive_queue);
662 skb_queue_purge(&sk->sk_write_queue);
665 static void l2cap_sock_cleanup_listen(struct sock *parent)
669 BT_DBG("parent %p", parent);
671 /* Close not yet accepted channels */
672 while ((sk = bt_accept_dequeue(parent, NULL)))
673 l2cap_sock_close(sk);
675 parent->sk_state = BT_CLOSED;
676 sock_set_flag(parent, SOCK_ZAPPED);
679 /* Kill socket (only if zapped and orphan)
680 * Must be called on unlocked socket.
682 static void l2cap_sock_kill(struct sock *sk)
684 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
687 BT_DBG("sk %p state %d", sk, sk->sk_state);
689 /* Kill poor orphan */
690 bt_sock_unlink(&l2cap_sk_list, sk);
691 sock_set_flag(sk, SOCK_DEAD);
695 static void __l2cap_sock_close(struct sock *sk, int reason)
697 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
699 switch (sk->sk_state) {
701 l2cap_sock_cleanup_listen(sk);
706 if (sk->sk_type == SOCK_SEQPACKET) {
707 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
709 sk->sk_state = BT_DISCONN;
710 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
711 l2cap_send_disconn_req(conn, sk);
713 l2cap_chan_del(sk, reason);
717 if (sk->sk_type == SOCK_SEQPACKET) {
718 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
719 struct l2cap_conn_rsp rsp;
722 if (bt_sk(sk)->defer_setup)
723 result = L2CAP_CR_SEC_BLOCK;
725 result = L2CAP_CR_BAD_PSM;
727 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
728 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
729 rsp.result = cpu_to_le16(result);
730 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
731 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
732 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
734 l2cap_chan_del(sk, reason);
739 l2cap_chan_del(sk, reason);
743 sock_set_flag(sk, SOCK_ZAPPED);
748 /* Must be called on unlocked socket. */
749 static void l2cap_sock_close(struct sock *sk)
751 l2cap_sock_clear_timer(sk);
753 __l2cap_sock_close(sk, ECONNRESET);
758 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
760 struct l2cap_pinfo *pi = l2cap_pi(sk);
765 sk->sk_type = parent->sk_type;
766 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
768 pi->imtu = l2cap_pi(parent)->imtu;
769 pi->omtu = l2cap_pi(parent)->omtu;
770 pi->mode = l2cap_pi(parent)->mode;
771 pi->fcs = l2cap_pi(parent)->fcs;
772 pi->sec_level = l2cap_pi(parent)->sec_level;
773 pi->role_switch = l2cap_pi(parent)->role_switch;
774 pi->force_reliable = l2cap_pi(parent)->force_reliable;
776 pi->imtu = L2CAP_DEFAULT_MTU;
778 pi->mode = L2CAP_MODE_BASIC;
779 pi->fcs = L2CAP_FCS_CRC16;
780 pi->sec_level = BT_SECURITY_LOW;
782 pi->force_reliable = 0;
785 /* Default config options */
787 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
788 skb_queue_head_init(TX_QUEUE(sk));
789 skb_queue_head_init(SREJ_QUEUE(sk));
790 INIT_LIST_HEAD(SREJ_LIST(sk));
793 static struct proto l2cap_proto = {
795 .owner = THIS_MODULE,
796 .obj_size = sizeof(struct l2cap_pinfo)
799 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
803 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
807 sock_init_data(sock, sk);
808 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
810 sk->sk_destruct = l2cap_sock_destruct;
811 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
813 sock_reset_flag(sk, SOCK_ZAPPED);
815 sk->sk_protocol = proto;
816 sk->sk_state = BT_OPEN;
818 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
820 bt_sock_link(&l2cap_sk_list, sk);
824 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
829 BT_DBG("sock %p", sock);
831 sock->state = SS_UNCONNECTED;
833 if (sock->type != SOCK_SEQPACKET &&
834 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
835 return -ESOCKTNOSUPPORT;
837 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
840 sock->ops = &l2cap_sock_ops;
842 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
846 l2cap_sock_init(sk, NULL);
850 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
852 struct sock *sk = sock->sk;
853 struct sockaddr_l2 la;
858 if (!addr || addr->sa_family != AF_BLUETOOTH)
861 memset(&la, 0, sizeof(la));
862 len = min_t(unsigned int, sizeof(la), alen);
863 memcpy(&la, addr, len);
870 if (sk->sk_state != BT_OPEN) {
875 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
876 !capable(CAP_NET_BIND_SERVICE)) {
881 write_lock_bh(&l2cap_sk_list.lock);
883 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
886 /* Save source address */
887 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
888 l2cap_pi(sk)->psm = la.l2_psm;
889 l2cap_pi(sk)->sport = la.l2_psm;
890 sk->sk_state = BT_BOUND;
892 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
893 __le16_to_cpu(la.l2_psm) == 0x0003)
894 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
897 write_unlock_bh(&l2cap_sk_list.lock);
904 static int l2cap_do_connect(struct sock *sk)
906 bdaddr_t *src = &bt_sk(sk)->src;
907 bdaddr_t *dst = &bt_sk(sk)->dst;
908 struct l2cap_conn *conn;
909 struct hci_conn *hcon;
910 struct hci_dev *hdev;
914 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
917 hdev = hci_get_route(dst, src);
919 return -EHOSTUNREACH;
921 hci_dev_lock_bh(hdev);
925 if (sk->sk_type == SOCK_RAW) {
926 switch (l2cap_pi(sk)->sec_level) {
927 case BT_SECURITY_HIGH:
928 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
930 case BT_SECURITY_MEDIUM:
931 auth_type = HCI_AT_DEDICATED_BONDING;
934 auth_type = HCI_AT_NO_BONDING;
937 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
938 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
939 auth_type = HCI_AT_NO_BONDING_MITM;
941 auth_type = HCI_AT_NO_BONDING;
943 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
944 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
946 switch (l2cap_pi(sk)->sec_level) {
947 case BT_SECURITY_HIGH:
948 auth_type = HCI_AT_GENERAL_BONDING_MITM;
950 case BT_SECURITY_MEDIUM:
951 auth_type = HCI_AT_GENERAL_BONDING;
954 auth_type = HCI_AT_NO_BONDING;
959 hcon = hci_connect(hdev, ACL_LINK, dst,
960 l2cap_pi(sk)->sec_level, auth_type);
964 conn = l2cap_conn_add(hcon, 0);
972 /* Update source addr of the socket */
973 bacpy(src, conn->src);
975 l2cap_chan_add(conn, sk, NULL);
977 sk->sk_state = BT_CONNECT;
978 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
980 if (hcon->state == BT_CONNECTED) {
981 if (sk->sk_type != SOCK_SEQPACKET) {
982 l2cap_sock_clear_timer(sk);
983 sk->sk_state = BT_CONNECTED;
989 hci_dev_unlock_bh(hdev);
994 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
996 struct sock *sk = sock->sk;
997 struct sockaddr_l2 la;
1000 BT_DBG("sk %p", sk);
1002 if (!addr || addr->sa_family != AF_BLUETOOTH)
1005 memset(&la, 0, sizeof(la));
1006 len = min_t(unsigned int, sizeof(la), alen);
1007 memcpy(&la, addr, len);
1014 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1019 switch (l2cap_pi(sk)->mode) {
1020 case L2CAP_MODE_BASIC:
1022 case L2CAP_MODE_ERTM:
1023 case L2CAP_MODE_STREAMING:
1032 switch (sk->sk_state) {
1036 /* Already connecting */
1040 /* Already connected */
1053 /* Set destination address and psm */
1054 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1055 l2cap_pi(sk)->psm = la.l2_psm;
1057 err = l2cap_do_connect(sk);
1062 err = bt_sock_wait_state(sk, BT_CONNECTED,
1063 sock_sndtimeo(sk, flags & O_NONBLOCK));
1069 static int l2cap_sock_listen(struct socket *sock, int backlog)
1071 struct sock *sk = sock->sk;
1074 BT_DBG("sk %p backlog %d", sk, backlog);
1078 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1083 switch (l2cap_pi(sk)->mode) {
1084 case L2CAP_MODE_BASIC:
1086 case L2CAP_MODE_ERTM:
1087 case L2CAP_MODE_STREAMING:
1096 if (!l2cap_pi(sk)->psm) {
1097 bdaddr_t *src = &bt_sk(sk)->src;
1102 write_lock_bh(&l2cap_sk_list.lock);
1104 for (psm = 0x1001; psm < 0x1100; psm += 2)
1105 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1106 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1107 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1112 write_unlock_bh(&l2cap_sk_list.lock);
1118 sk->sk_max_ack_backlog = backlog;
1119 sk->sk_ack_backlog = 0;
1120 sk->sk_state = BT_LISTEN;
1127 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1129 DECLARE_WAITQUEUE(wait, current);
1130 struct sock *sk = sock->sk, *nsk;
1134 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1136 if (sk->sk_state != BT_LISTEN) {
1141 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1143 BT_DBG("sk %p timeo %ld", sk, timeo);
1145 /* Wait for an incoming connection. (wake-one). */
1146 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1147 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1148 set_current_state(TASK_INTERRUPTIBLE);
1155 timeo = schedule_timeout(timeo);
1156 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1158 if (sk->sk_state != BT_LISTEN) {
1163 if (signal_pending(current)) {
1164 err = sock_intr_errno(timeo);
1168 set_current_state(TASK_RUNNING);
1169 remove_wait_queue(sk->sk_sleep, &wait);
1174 newsock->state = SS_CONNECTED;
1176 BT_DBG("new socket %p", nsk);
1183 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1185 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1186 struct sock *sk = sock->sk;
1188 BT_DBG("sock %p, sk %p", sock, sk);
1190 addr->sa_family = AF_BLUETOOTH;
1191 *len = sizeof(struct sockaddr_l2);
1194 la->l2_psm = l2cap_pi(sk)->psm;
1195 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1196 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1198 la->l2_psm = l2cap_pi(sk)->sport;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1206 static void l2cap_monitor_timeout(unsigned long arg)
1208 struct sock *sk = (void *) arg;
1212 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1213 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1217 l2cap_pi(sk)->retry_count++;
1218 __mod_monitor_timer();
1220 control = L2CAP_CTRL_POLL;
1221 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1225 static void l2cap_retrans_timeout(unsigned long arg)
1227 struct sock *sk = (void *) arg;
1231 l2cap_pi(sk)->retry_count = 1;
1232 __mod_monitor_timer();
1234 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1236 control = L2CAP_CTRL_POLL;
1237 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1241 static void l2cap_drop_acked_frames(struct sock *sk)
1243 struct sk_buff *skb;
1245 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1246 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1249 skb = skb_dequeue(TX_QUEUE(sk));
1252 l2cap_pi(sk)->unacked_frames--;
1255 if (!l2cap_pi(sk)->unacked_frames)
1256 del_timer(&l2cap_pi(sk)->retrans_timer);
1261 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1263 struct l2cap_pinfo *pi = l2cap_pi(sk);
1266 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1268 err = hci_send_acl(pi->conn->hcon, skb, 0);
1275 static int l2cap_streaming_send(struct sock *sk)
1277 struct sk_buff *skb, *tx_skb;
1278 struct l2cap_pinfo *pi = l2cap_pi(sk);
1282 while ((skb = sk->sk_send_head)) {
1283 tx_skb = skb_clone(skb, GFP_ATOMIC);
1285 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1286 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1287 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1289 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1290 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1291 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1294 err = l2cap_do_send(sk, tx_skb);
1296 l2cap_send_disconn_req(pi->conn, sk);
1300 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1302 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1303 sk->sk_send_head = NULL;
1305 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1307 skb = skb_dequeue(TX_QUEUE(sk));
1313 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1315 struct l2cap_pinfo *pi = l2cap_pi(sk);
1316 struct sk_buff *skb, *tx_skb;
1320 skb = skb_peek(TX_QUEUE(sk));
1322 if (bt_cb(skb)->tx_seq != tx_seq) {
1323 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1325 skb = skb_queue_next(TX_QUEUE(sk), skb);
1329 if (pi->remote_max_tx &&
1330 bt_cb(skb)->retries == pi->remote_max_tx) {
1331 l2cap_send_disconn_req(pi->conn, sk);
1335 tx_skb = skb_clone(skb, GFP_ATOMIC);
1336 bt_cb(skb)->retries++;
1337 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1338 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1339 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1340 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1342 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1343 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1344 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1347 err = l2cap_do_send(sk, tx_skb);
1349 l2cap_send_disconn_req(pi->conn, sk);
1357 static int l2cap_ertm_send(struct sock *sk)
1359 struct sk_buff *skb, *tx_skb;
1360 struct l2cap_pinfo *pi = l2cap_pi(sk);
1364 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1367 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1368 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1369 tx_skb = skb_clone(skb, GFP_ATOMIC);
1371 if (pi->remote_max_tx &&
1372 bt_cb(skb)->retries == pi->remote_max_tx) {
1373 l2cap_send_disconn_req(pi->conn, sk);
1377 bt_cb(skb)->retries++;
1379 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1380 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1381 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1382 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1385 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1386 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1387 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1390 err = l2cap_do_send(sk, tx_skb);
1392 l2cap_send_disconn_req(pi->conn, sk);
1395 __mod_retrans_timer();
1397 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1398 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1400 pi->unacked_frames++;
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1411 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1413 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1414 struct sk_buff **frag;
1417 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1424 /* Continuation fragments (no L2CAP header) */
1425 frag = &skb_shinfo(skb)->frag_list;
1427 count = min_t(unsigned int, conn->mtu, len);
1429 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1432 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1438 frag = &(*frag)->next;
1444 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1446 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1447 struct sk_buff *skb;
1448 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1449 struct l2cap_hdr *lh;
1451 BT_DBG("sk %p len %d", sk, (int)len);
1453 count = min_t(unsigned int, (conn->mtu - hlen), len);
1454 skb = bt_skb_send_alloc(sk, count + hlen,
1455 msg->msg_flags & MSG_DONTWAIT, &err);
1457 return ERR_PTR(-ENOMEM);
1459 /* Create L2CAP header */
1460 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1461 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1462 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1463 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1465 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1466 if (unlikely(err < 0)) {
1468 return ERR_PTR(err);
1473 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1475 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1476 struct sk_buff *skb;
1477 int err, count, hlen = L2CAP_HDR_SIZE;
1478 struct l2cap_hdr *lh;
1480 BT_DBG("sk %p len %d", sk, (int)len);
1482 count = min_t(unsigned int, (conn->mtu - hlen), len);
1483 skb = bt_skb_send_alloc(sk, count + hlen,
1484 msg->msg_flags & MSG_DONTWAIT, &err);
1486 return ERR_PTR(-ENOMEM);
1488 /* Create L2CAP header */
1489 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1490 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1491 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1493 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1494 if (unlikely(err < 0)) {
1496 return ERR_PTR(err);
1501 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1503 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1504 struct sk_buff *skb;
1505 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1506 struct l2cap_hdr *lh;
1508 BT_DBG("sk %p len %d", sk, (int)len);
1513 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1520 return ERR_PTR(-ENOMEM);
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1526 put_unaligned_le16(control, skb_put(skb, 2));
1528 put_unaligned_le16(sdulen, skb_put(skb, 2));
1530 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1531 if (unlikely(err < 0)) {
1533 return ERR_PTR(err);
1536 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1537 put_unaligned_le16(0, skb_put(skb, 2));
1539 bt_cb(skb)->retries = 0;
1543 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1545 struct l2cap_pinfo *pi = l2cap_pi(sk);
1546 struct sk_buff *skb;
1547 struct sk_buff_head sar_queue;
1551 __skb_queue_head_init(&sar_queue);
1552 control = L2CAP_SDU_START;
1553 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1555 return PTR_ERR(skb);
1557 __skb_queue_tail(&sar_queue, skb);
1558 len -= pi->max_pdu_size;
1559 size +=pi->max_pdu_size;
1565 if (len > pi->max_pdu_size) {
1566 control |= L2CAP_SDU_CONTINUE;
1567 buflen = pi->max_pdu_size;
1569 control |= L2CAP_SDU_END;
1573 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1575 skb_queue_purge(&sar_queue);
1576 return PTR_ERR(skb);
1579 __skb_queue_tail(&sar_queue, skb);
1584 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1585 if (sk->sk_send_head == NULL)
1586 sk->sk_send_head = sar_queue.next;
1591 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1593 struct sock *sk = sock->sk;
1594 struct l2cap_pinfo *pi = l2cap_pi(sk);
1595 struct sk_buff *skb;
1599 BT_DBG("sock %p, sk %p", sock, sk);
1601 err = sock_error(sk);
1605 if (msg->msg_flags & MSG_OOB)
1608 /* Check outgoing MTU */
1609 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC &&
1615 if (sk->sk_state != BT_CONNECTED) {
1620 /* Connectionless channel */
1621 if (sk->sk_type == SOCK_DGRAM) {
1622 skb = l2cap_create_connless_pdu(sk, msg, len);
1623 err = l2cap_do_send(sk, skb);
1628 case L2CAP_MODE_BASIC:
1629 /* Create a basic PDU */
1630 skb = l2cap_create_basic_pdu(sk, msg, len);
1636 err = l2cap_do_send(sk, skb);
1641 case L2CAP_MODE_ERTM:
1642 case L2CAP_MODE_STREAMING:
1643 /* Entire SDU fits into one PDU */
1644 if (len <= pi->max_pdu_size) {
1645 control = L2CAP_SDU_UNSEGMENTED;
1646 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1651 __skb_queue_tail(TX_QUEUE(sk), skb);
1652 if (sk->sk_send_head == NULL)
1653 sk->sk_send_head = skb;
1655 /* Segment SDU into multiples PDUs */
1656 err = l2cap_sar_segment_sdu(sk, msg, len);
1661 if (pi->mode == L2CAP_MODE_STREAMING)
1662 err = l2cap_streaming_send(sk);
1664 err = l2cap_ertm_send(sk);
1671 BT_DBG("bad state %1.1x", pi->mode);
1680 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1682 struct sock *sk = sock->sk;
1686 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1687 struct l2cap_conn_rsp rsp;
1689 sk->sk_state = BT_CONFIG;
1691 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1692 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1693 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1694 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1695 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1696 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1704 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1707 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1709 struct sock *sk = sock->sk;
1710 struct l2cap_options opts;
1714 BT_DBG("sk %p", sk);
1720 opts.imtu = l2cap_pi(sk)->imtu;
1721 opts.omtu = l2cap_pi(sk)->omtu;
1722 opts.flush_to = l2cap_pi(sk)->flush_to;
1723 opts.mode = l2cap_pi(sk)->mode;
1724 opts.fcs = l2cap_pi(sk)->fcs;
1726 len = min_t(unsigned int, sizeof(opts), optlen);
1727 if (copy_from_user((char *) &opts, optval, len)) {
1732 l2cap_pi(sk)->imtu = opts.imtu;
1733 l2cap_pi(sk)->omtu = opts.omtu;
1734 l2cap_pi(sk)->mode = opts.mode;
1735 l2cap_pi(sk)->fcs = opts.fcs;
1739 if (get_user(opt, (u32 __user *) optval)) {
1744 if (opt & L2CAP_LM_AUTH)
1745 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1746 if (opt & L2CAP_LM_ENCRYPT)
1747 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1748 if (opt & L2CAP_LM_SECURE)
1749 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1751 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1752 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1764 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1766 struct sock *sk = sock->sk;
1767 struct bt_security sec;
1771 BT_DBG("sk %p", sk);
1773 if (level == SOL_L2CAP)
1774 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1776 if (level != SOL_BLUETOOTH)
1777 return -ENOPROTOOPT;
1783 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1788 sec.level = BT_SECURITY_LOW;
1790 len = min_t(unsigned int, sizeof(sec), optlen);
1791 if (copy_from_user((char *) &sec, optval, len)) {
1796 if (sec.level < BT_SECURITY_LOW ||
1797 sec.level > BT_SECURITY_HIGH) {
1802 l2cap_pi(sk)->sec_level = sec.level;
1805 case BT_DEFER_SETUP:
1806 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1811 if (get_user(opt, (u32 __user *) optval)) {
1816 bt_sk(sk)->defer_setup = opt;
1828 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1830 struct sock *sk = sock->sk;
1831 struct l2cap_options opts;
1832 struct l2cap_conninfo cinfo;
1836 BT_DBG("sk %p", sk);
1838 if (get_user(len, optlen))
1845 opts.imtu = l2cap_pi(sk)->imtu;
1846 opts.omtu = l2cap_pi(sk)->omtu;
1847 opts.flush_to = l2cap_pi(sk)->flush_to;
1848 opts.mode = l2cap_pi(sk)->mode;
1849 opts.fcs = l2cap_pi(sk)->fcs;
1851 len = min_t(unsigned int, len, sizeof(opts));
1852 if (copy_to_user(optval, (char *) &opts, len))
1858 switch (l2cap_pi(sk)->sec_level) {
1859 case BT_SECURITY_LOW:
1860 opt = L2CAP_LM_AUTH;
1862 case BT_SECURITY_MEDIUM:
1863 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1865 case BT_SECURITY_HIGH:
1866 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1874 if (l2cap_pi(sk)->role_switch)
1875 opt |= L2CAP_LM_MASTER;
1877 if (l2cap_pi(sk)->force_reliable)
1878 opt |= L2CAP_LM_RELIABLE;
1880 if (put_user(opt, (u32 __user *) optval))
1884 case L2CAP_CONNINFO:
1885 if (sk->sk_state != BT_CONNECTED &&
1886 !(sk->sk_state == BT_CONNECT2 &&
1887 bt_sk(sk)->defer_setup)) {
1892 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1893 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1895 len = min_t(unsigned int, len, sizeof(cinfo));
1896 if (copy_to_user(optval, (char *) &cinfo, len))
1910 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1912 struct sock *sk = sock->sk;
1913 struct bt_security sec;
1916 BT_DBG("sk %p", sk);
1918 if (level == SOL_L2CAP)
1919 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1921 if (level != SOL_BLUETOOTH)
1922 return -ENOPROTOOPT;
1924 if (get_user(len, optlen))
1931 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1936 sec.level = l2cap_pi(sk)->sec_level;
1938 len = min_t(unsigned int, len, sizeof(sec));
1939 if (copy_to_user(optval, (char *) &sec, len))
1944 case BT_DEFER_SETUP:
1945 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1950 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1964 static int l2cap_sock_shutdown(struct socket *sock, int how)
1966 struct sock *sk = sock->sk;
1969 BT_DBG("sock %p, sk %p", sock, sk);
1975 if (!sk->sk_shutdown) {
1976 sk->sk_shutdown = SHUTDOWN_MASK;
1977 l2cap_sock_clear_timer(sk);
1978 __l2cap_sock_close(sk, 0);
1980 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1981 err = bt_sock_wait_state(sk, BT_CLOSED,
1988 static int l2cap_sock_release(struct socket *sock)
1990 struct sock *sk = sock->sk;
1993 BT_DBG("sock %p, sk %p", sock, sk);
1998 err = l2cap_sock_shutdown(sock, 2);
2001 l2cap_sock_kill(sk);
2005 static void l2cap_chan_ready(struct sock *sk)
2007 struct sock *parent = bt_sk(sk)->parent;
2009 BT_DBG("sk %p, parent %p", sk, parent);
2011 l2cap_pi(sk)->conf_state = 0;
2012 l2cap_sock_clear_timer(sk);
2015 /* Outgoing channel.
2016 * Wake up socket sleeping on connect.
2018 sk->sk_state = BT_CONNECTED;
2019 sk->sk_state_change(sk);
2021 /* Incoming channel.
2022 * Wake up socket sleeping on accept.
2024 parent->sk_data_ready(parent, 0);
2028 /* Copy frame to all raw sockets on that connection */
2029 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2031 struct l2cap_chan_list *l = &conn->chan_list;
2032 struct sk_buff *nskb;
2035 BT_DBG("conn %p", conn);
2037 read_lock(&l->lock);
2038 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2039 if (sk->sk_type != SOCK_RAW)
2042 /* Don't send frame to the socket it came from */
2045 nskb = skb_clone(skb, GFP_ATOMIC);
2049 if (sock_queue_rcv_skb(sk, nskb))
2052 read_unlock(&l->lock);
2055 /* ---- L2CAP signalling commands ---- */
2056 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2057 u8 code, u8 ident, u16 dlen, void *data)
2059 struct sk_buff *skb, **frag;
2060 struct l2cap_cmd_hdr *cmd;
2061 struct l2cap_hdr *lh;
2064 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2065 conn, code, ident, dlen);
2067 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2068 count = min_t(unsigned int, conn->mtu, len);
2070 skb = bt_skb_alloc(count, GFP_ATOMIC);
2074 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2075 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2076 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2078 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2081 cmd->len = cpu_to_le16(dlen);
2084 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2085 memcpy(skb_put(skb, count), data, count);
2091 /* Continuation fragments (no L2CAP header) */
2092 frag = &skb_shinfo(skb)->frag_list;
2094 count = min_t(unsigned int, conn->mtu, len);
2096 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2100 memcpy(skb_put(*frag, count), data, count);
2105 frag = &(*frag)->next;
2115 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2117 struct l2cap_conf_opt *opt = *ptr;
2120 len = L2CAP_CONF_OPT_SIZE + opt->len;
2128 *val = *((u8 *) opt->val);
2132 *val = __le16_to_cpu(*((__le16 *) opt->val));
2136 *val = __le32_to_cpu(*((__le32 *) opt->val));
2140 *val = (unsigned long) opt->val;
2144 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2148 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2150 struct l2cap_conf_opt *opt = *ptr;
2152 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2159 *((u8 *) opt->val) = val;
2163 *((__le16 *) opt->val) = cpu_to_le16(val);
2167 *((__le32 *) opt->val) = cpu_to_le32(val);
2171 memcpy(opt->val, (void *) val, len);
2175 *ptr += L2CAP_CONF_OPT_SIZE + len;
2178 static inline void l2cap_ertm_init(struct sock *sk)
2180 l2cap_pi(sk)->expected_ack_seq = 0;
2181 l2cap_pi(sk)->unacked_frames = 0;
2182 l2cap_pi(sk)->buffer_seq = 0;
2183 l2cap_pi(sk)->num_to_ack = 0;
2185 setup_timer(&l2cap_pi(sk)->retrans_timer,
2186 l2cap_retrans_timeout, (unsigned long) sk);
2187 setup_timer(&l2cap_pi(sk)->monitor_timer,
2188 l2cap_monitor_timeout, (unsigned long) sk);
2190 __skb_queue_head_init(SREJ_QUEUE(sk));
2193 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2195 u32 local_feat_mask = l2cap_feat_mask;
2197 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2200 case L2CAP_MODE_ERTM:
2201 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2202 case L2CAP_MODE_STREAMING:
2203 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2209 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2212 case L2CAP_MODE_STREAMING:
2213 case L2CAP_MODE_ERTM:
2214 if (l2cap_mode_supported(mode, remote_feat_mask))
2218 return L2CAP_MODE_BASIC;
2222 static int l2cap_build_conf_req(struct sock *sk, void *data)
2224 struct l2cap_pinfo *pi = l2cap_pi(sk);
2225 struct l2cap_conf_req *req = data;
2226 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2227 void *ptr = req->data;
2229 BT_DBG("sk %p", sk);
2231 if (pi->num_conf_req || pi->num_conf_rsp)
2235 case L2CAP_MODE_STREAMING:
2236 case L2CAP_MODE_ERTM:
2237 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2238 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2239 l2cap_send_disconn_req(pi->conn, sk);
2242 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2248 case L2CAP_MODE_BASIC:
2249 if (pi->imtu != L2CAP_DEFAULT_MTU)
2250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2253 case L2CAP_MODE_ERTM:
2254 rfc.mode = L2CAP_MODE_ERTM;
2255 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2256 rfc.max_transmit = L2CAP_DEFAULT_MAX_TX;
2257 rfc.retrans_timeout = 0;
2258 rfc.monitor_timeout = 0;
2259 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2264 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2267 if (pi->fcs == L2CAP_FCS_NONE ||
2268 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2269 pi->fcs = L2CAP_FCS_NONE;
2270 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2274 case L2CAP_MODE_STREAMING:
2275 rfc.mode = L2CAP_MODE_STREAMING;
2277 rfc.max_transmit = 0;
2278 rfc.retrans_timeout = 0;
2279 rfc.monitor_timeout = 0;
2280 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2283 sizeof(rfc), (unsigned long) &rfc);
2285 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2288 if (pi->fcs == L2CAP_FCS_NONE ||
2289 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2290 pi->fcs = L2CAP_FCS_NONE;
2291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2296 /* FIXME: Need actual value of the flush timeout */
2297 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2298 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2300 req->dcid = cpu_to_le16(pi->dcid);
2301 req->flags = cpu_to_le16(0);
2306 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2308 struct l2cap_pinfo *pi = l2cap_pi(sk);
2309 struct l2cap_conf_rsp *rsp = data;
2310 void *ptr = rsp->data;
2311 void *req = pi->conf_req;
2312 int len = pi->conf_len;
2313 int type, hint, olen;
2315 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2316 u16 mtu = L2CAP_DEFAULT_MTU;
2317 u16 result = L2CAP_CONF_SUCCESS;
2319 BT_DBG("sk %p", sk);
2321 while (len >= L2CAP_CONF_OPT_SIZE) {
2322 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2324 hint = type & L2CAP_CONF_HINT;
2325 type &= L2CAP_CONF_MASK;
2328 case L2CAP_CONF_MTU:
2332 case L2CAP_CONF_FLUSH_TO:
2336 case L2CAP_CONF_QOS:
2339 case L2CAP_CONF_RFC:
2340 if (olen == sizeof(rfc))
2341 memcpy(&rfc, (void *) val, olen);
2344 case L2CAP_CONF_FCS:
2345 if (val == L2CAP_FCS_NONE)
2346 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2354 result = L2CAP_CONF_UNKNOWN;
2355 *((u8 *) ptr++) = type;
2360 if (pi->num_conf_rsp || pi->num_conf_req)
2364 case L2CAP_MODE_STREAMING:
2365 case L2CAP_MODE_ERTM:
2366 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2367 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2368 return -ECONNREFUSED;
2371 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2376 if (pi->mode != rfc.mode) {
2377 result = L2CAP_CONF_UNACCEPT;
2378 rfc.mode = pi->mode;
2380 if (pi->num_conf_rsp == 1)
2381 return -ECONNREFUSED;
2383 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2384 sizeof(rfc), (unsigned long) &rfc);
2388 if (result == L2CAP_CONF_SUCCESS) {
2389 /* Configure output options and let the other side know
2390 * which ones we don't like. */
2392 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2393 result = L2CAP_CONF_UNACCEPT;
2396 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2401 case L2CAP_MODE_BASIC:
2402 pi->fcs = L2CAP_FCS_NONE;
2403 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2406 case L2CAP_MODE_ERTM:
2407 pi->remote_tx_win = rfc.txwin_size;
2408 pi->remote_max_tx = rfc.max_transmit;
2409 pi->max_pdu_size = rfc.max_pdu_size;
2411 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2412 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2414 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2416 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2417 sizeof(rfc), (unsigned long) &rfc);
2421 case L2CAP_MODE_STREAMING:
2422 pi->remote_tx_win = rfc.txwin_size;
2423 pi->max_pdu_size = rfc.max_pdu_size;
2425 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2427 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2428 sizeof(rfc), (unsigned long) &rfc);
2433 result = L2CAP_CONF_UNACCEPT;
2435 memset(&rfc, 0, sizeof(rfc));
2436 rfc.mode = pi->mode;
2439 if (result == L2CAP_CONF_SUCCESS)
2440 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2442 rsp->scid = cpu_to_le16(pi->dcid);
2443 rsp->result = cpu_to_le16(result);
2444 rsp->flags = cpu_to_le16(0x0000);
2449 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2451 struct l2cap_pinfo *pi = l2cap_pi(sk);
2452 struct l2cap_conf_req *req = data;
2453 void *ptr = req->data;
2456 struct l2cap_conf_rfc rfc;
2458 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2460 while (len >= L2CAP_CONF_OPT_SIZE) {
2461 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2464 case L2CAP_CONF_MTU:
2465 if (val < L2CAP_DEFAULT_MIN_MTU) {
2466 *result = L2CAP_CONF_UNACCEPT;
2467 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2473 case L2CAP_CONF_FLUSH_TO:
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2479 case L2CAP_CONF_RFC:
2480 if (olen == sizeof(rfc))
2481 memcpy(&rfc, (void *)val, olen);
2483 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2484 rfc.mode != pi->mode)
2485 return -ECONNREFUSED;
2487 pi->mode = rfc.mode;
2490 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2491 sizeof(rfc), (unsigned long) &rfc);
2496 if (*result == L2CAP_CONF_SUCCESS) {
2498 case L2CAP_MODE_ERTM:
2499 pi->remote_tx_win = rfc.txwin_size;
2500 pi->retrans_timeout = rfc.retrans_timeout;
2501 pi->monitor_timeout = rfc.monitor_timeout;
2502 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2504 case L2CAP_MODE_STREAMING:
2505 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2510 req->dcid = cpu_to_le16(pi->dcid);
2511 req->flags = cpu_to_le16(0x0000);
2516 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2518 struct l2cap_conf_rsp *rsp = data;
2519 void *ptr = rsp->data;
2521 BT_DBG("sk %p", sk);
2523 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2524 rsp->result = cpu_to_le16(result);
2525 rsp->flags = cpu_to_le16(flags);
2530 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2532 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2534 if (rej->reason != 0x0000)
2537 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2538 cmd->ident == conn->info_ident) {
2539 del_timer(&conn->info_timer);
2541 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2542 conn->info_ident = 0;
2544 l2cap_conn_start(conn);
2550 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2552 struct l2cap_chan_list *list = &conn->chan_list;
2553 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2554 struct l2cap_conn_rsp rsp;
2555 struct sock *sk, *parent;
2556 int result, status = L2CAP_CS_NO_INFO;
2558 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2559 __le16 psm = req->psm;
2561 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2563 /* Check if we have socket listening on psm */
2564 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2566 result = L2CAP_CR_BAD_PSM;
2570 /* Check if the ACL is secure enough (if not SDP) */
2571 if (psm != cpu_to_le16(0x0001) &&
2572 !hci_conn_check_link_mode(conn->hcon)) {
2573 conn->disc_reason = 0x05;
2574 result = L2CAP_CR_SEC_BLOCK;
2578 result = L2CAP_CR_NO_MEM;
2580 /* Check for backlog size */
2581 if (sk_acceptq_is_full(parent)) {
2582 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2586 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2590 write_lock_bh(&list->lock);
2592 /* Check if we already have channel with that dcid */
2593 if (__l2cap_get_chan_by_dcid(list, scid)) {
2594 write_unlock_bh(&list->lock);
2595 sock_set_flag(sk, SOCK_ZAPPED);
2596 l2cap_sock_kill(sk);
2600 hci_conn_hold(conn->hcon);
2602 l2cap_sock_init(sk, parent);
2603 bacpy(&bt_sk(sk)->src, conn->src);
2604 bacpy(&bt_sk(sk)->dst, conn->dst);
2605 l2cap_pi(sk)->psm = psm;
2606 l2cap_pi(sk)->dcid = scid;
2608 __l2cap_chan_add(conn, sk, parent);
2609 dcid = l2cap_pi(sk)->scid;
2611 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2613 l2cap_pi(sk)->ident = cmd->ident;
2615 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2616 if (l2cap_check_security(sk)) {
2617 if (bt_sk(sk)->defer_setup) {
2618 sk->sk_state = BT_CONNECT2;
2619 result = L2CAP_CR_PEND;
2620 status = L2CAP_CS_AUTHOR_PEND;
2621 parent->sk_data_ready(parent, 0);
2623 sk->sk_state = BT_CONFIG;
2624 result = L2CAP_CR_SUCCESS;
2625 status = L2CAP_CS_NO_INFO;
2628 sk->sk_state = BT_CONNECT2;
2629 result = L2CAP_CR_PEND;
2630 status = L2CAP_CS_AUTHEN_PEND;
2633 sk->sk_state = BT_CONNECT2;
2634 result = L2CAP_CR_PEND;
2635 status = L2CAP_CS_NO_INFO;
2638 write_unlock_bh(&list->lock);
2641 bh_unlock_sock(parent);
2644 rsp.scid = cpu_to_le16(scid);
2645 rsp.dcid = cpu_to_le16(dcid);
2646 rsp.result = cpu_to_le16(result);
2647 rsp.status = cpu_to_le16(status);
2648 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2650 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2651 struct l2cap_info_req info;
2652 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2655 conn->info_ident = l2cap_get_ident(conn);
2657 mod_timer(&conn->info_timer, jiffies +
2658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2660 l2cap_send_cmd(conn, conn->info_ident,
2661 L2CAP_INFO_REQ, sizeof(info), &info);
2667 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2670 u16 scid, dcid, result, status;
2674 scid = __le16_to_cpu(rsp->scid);
2675 dcid = __le16_to_cpu(rsp->dcid);
2676 result = __le16_to_cpu(rsp->result);
2677 status = __le16_to_cpu(rsp->status);
2679 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2682 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2686 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2692 case L2CAP_CR_SUCCESS:
2693 sk->sk_state = BT_CONFIG;
2694 l2cap_pi(sk)->ident = 0;
2695 l2cap_pi(sk)->dcid = dcid;
2696 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2698 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2700 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2701 l2cap_build_conf_req(sk, req), req);
2702 l2cap_pi(sk)->num_conf_req++;
2706 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2710 l2cap_chan_del(sk, ECONNREFUSED);
2718 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2720 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2726 dcid = __le16_to_cpu(req->dcid);
2727 flags = __le16_to_cpu(req->flags);
2729 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2731 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2735 if (sk->sk_state == BT_DISCONN)
2738 /* Reject if config buffer is too small. */
2739 len = cmd_len - sizeof(*req);
2740 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2741 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2742 l2cap_build_conf_rsp(sk, rsp,
2743 L2CAP_CONF_REJECT, flags), rsp);
2748 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2749 l2cap_pi(sk)->conf_len += len;
2751 if (flags & 0x0001) {
2752 /* Incomplete config. Send empty response. */
2753 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2754 l2cap_build_conf_rsp(sk, rsp,
2755 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2759 /* Complete config. */
2760 len = l2cap_parse_conf_req(sk, rsp);
2762 l2cap_send_disconn_req(conn, sk);
2766 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2767 l2cap_pi(sk)->num_conf_rsp++;
2769 /* Reset config buffer. */
2770 l2cap_pi(sk)->conf_len = 0;
2772 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2775 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2776 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2777 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2778 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2780 sk->sk_state = BT_CONNECTED;
2782 l2cap_pi(sk)->next_tx_seq = 0;
2783 l2cap_pi(sk)->expected_tx_seq = 0;
2784 __skb_queue_head_init(TX_QUEUE(sk));
2785 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2786 l2cap_ertm_init(sk);
2788 l2cap_chan_ready(sk);
2792 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2794 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2795 l2cap_build_conf_req(sk, buf), buf);
2796 l2cap_pi(sk)->num_conf_req++;
2804 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2807 u16 scid, flags, result;
2810 scid = __le16_to_cpu(rsp->scid);
2811 flags = __le16_to_cpu(rsp->flags);
2812 result = __le16_to_cpu(rsp->result);
2814 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2815 scid, flags, result);
2817 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2822 case L2CAP_CONF_SUCCESS:
2825 case L2CAP_CONF_UNACCEPT:
2826 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2827 int len = cmd->len - sizeof(*rsp);
2830 /* throw out any old stored conf requests */
2831 result = L2CAP_CONF_SUCCESS;
2832 len = l2cap_parse_conf_rsp(sk, rsp->data,
2835 l2cap_send_disconn_req(conn, sk);
2839 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2840 L2CAP_CONF_REQ, len, req);
2841 l2cap_pi(sk)->num_conf_req++;
2842 if (result != L2CAP_CONF_SUCCESS)
2848 sk->sk_state = BT_DISCONN;
2849 sk->sk_err = ECONNRESET;
2850 l2cap_sock_set_timer(sk, HZ * 5);
2851 l2cap_send_disconn_req(conn, sk);
2858 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2860 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2861 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2862 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2863 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2865 sk->sk_state = BT_CONNECTED;
2866 l2cap_pi(sk)->next_tx_seq = 0;
2867 l2cap_pi(sk)->expected_tx_seq = 0;
2868 __skb_queue_head_init(TX_QUEUE(sk));
2869 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2870 l2cap_ertm_init(sk);
2872 l2cap_chan_ready(sk);
2880 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2882 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2883 struct l2cap_disconn_rsp rsp;
2887 scid = __le16_to_cpu(req->scid);
2888 dcid = __le16_to_cpu(req->dcid);
2890 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2892 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2896 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2897 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2898 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2900 sk->sk_shutdown = SHUTDOWN_MASK;
2902 skb_queue_purge(TX_QUEUE(sk));
2904 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2905 skb_queue_purge(SREJ_QUEUE(sk));
2906 del_timer(&l2cap_pi(sk)->retrans_timer);
2907 del_timer(&l2cap_pi(sk)->monitor_timer);
2910 l2cap_chan_del(sk, ECONNRESET);
2913 l2cap_sock_kill(sk);
2917 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2919 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2923 scid = __le16_to_cpu(rsp->scid);
2924 dcid = __le16_to_cpu(rsp->dcid);
2926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2928 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2932 skb_queue_purge(TX_QUEUE(sk));
2934 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2935 skb_queue_purge(SREJ_QUEUE(sk));
2936 del_timer(&l2cap_pi(sk)->retrans_timer);
2937 del_timer(&l2cap_pi(sk)->monitor_timer);
2940 l2cap_chan_del(sk, 0);
2943 l2cap_sock_kill(sk);
2947 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2949 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2952 type = __le16_to_cpu(req->type);
2954 BT_DBG("type 0x%4.4x", type);
2956 if (type == L2CAP_IT_FEAT_MASK) {
2958 u32 feat_mask = l2cap_feat_mask;
2959 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2960 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2961 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2963 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2965 put_unaligned_le32(feat_mask, rsp->data);
2966 l2cap_send_cmd(conn, cmd->ident,
2967 L2CAP_INFO_RSP, sizeof(buf), buf);
2968 } else if (type == L2CAP_IT_FIXED_CHAN) {
2970 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2971 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2972 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2973 memcpy(buf + 4, l2cap_fixed_chan, 8);
2974 l2cap_send_cmd(conn, cmd->ident,
2975 L2CAP_INFO_RSP, sizeof(buf), buf);
2977 struct l2cap_info_rsp rsp;
2978 rsp.type = cpu_to_le16(type);
2979 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2980 l2cap_send_cmd(conn, cmd->ident,
2981 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2987 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2989 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2992 type = __le16_to_cpu(rsp->type);
2993 result = __le16_to_cpu(rsp->result);
2995 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2997 del_timer(&conn->info_timer);
2999 if (type == L2CAP_IT_FEAT_MASK) {
3000 conn->feat_mask = get_unaligned_le32(rsp->data);
3002 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3003 struct l2cap_info_req req;
3004 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3006 conn->info_ident = l2cap_get_ident(conn);
3008 l2cap_send_cmd(conn, conn->info_ident,
3009 L2CAP_INFO_REQ, sizeof(req), &req);
3011 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3012 conn->info_ident = 0;
3014 l2cap_conn_start(conn);
3016 } else if (type == L2CAP_IT_FIXED_CHAN) {
3017 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3018 conn->info_ident = 0;
3020 l2cap_conn_start(conn);
3026 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3028 u8 *data = skb->data;
3030 struct l2cap_cmd_hdr cmd;
3033 l2cap_raw_recv(conn, skb);
3035 while (len >= L2CAP_CMD_HDR_SIZE) {
3037 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3038 data += L2CAP_CMD_HDR_SIZE;
3039 len -= L2CAP_CMD_HDR_SIZE;
3041 cmd_len = le16_to_cpu(cmd.len);
3043 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3045 if (cmd_len > len || !cmd.ident) {
3046 BT_DBG("corrupted command");
3051 case L2CAP_COMMAND_REJ:
3052 l2cap_command_rej(conn, &cmd, data);
3055 case L2CAP_CONN_REQ:
3056 err = l2cap_connect_req(conn, &cmd, data);
3059 case L2CAP_CONN_RSP:
3060 err = l2cap_connect_rsp(conn, &cmd, data);
3063 case L2CAP_CONF_REQ:
3064 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3067 case L2CAP_CONF_RSP:
3068 err = l2cap_config_rsp(conn, &cmd, data);
3071 case L2CAP_DISCONN_REQ:
3072 err = l2cap_disconnect_req(conn, &cmd, data);
3075 case L2CAP_DISCONN_RSP:
3076 err = l2cap_disconnect_rsp(conn, &cmd, data);
3079 case L2CAP_ECHO_REQ:
3080 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3083 case L2CAP_ECHO_RSP:
3086 case L2CAP_INFO_REQ:
3087 err = l2cap_information_req(conn, &cmd, data);
3090 case L2CAP_INFO_RSP:
3091 err = l2cap_information_rsp(conn, &cmd, data);
3095 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3101 struct l2cap_cmd_rej rej;
3102 BT_DBG("error %d", err);
3104 /* FIXME: Map err to a valid reason */
3105 rej.reason = cpu_to_le16(0);
3106 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3116 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3118 u16 our_fcs, rcv_fcs;
3119 int hdr_size = L2CAP_HDR_SIZE + 2;
3121 if (pi->fcs == L2CAP_FCS_CRC16) {
3122 skb_trim(skb, skb->len - 2);
3123 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3124 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3126 if (our_fcs != rcv_fcs)
3132 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3134 struct sk_buff *next_skb;
3136 bt_cb(skb)->tx_seq = tx_seq;
3137 bt_cb(skb)->sar = sar;
3139 next_skb = skb_peek(SREJ_QUEUE(sk));
3141 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3146 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3147 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3151 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3154 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3156 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3159 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3161 struct l2cap_pinfo *pi = l2cap_pi(sk);
3162 struct sk_buff *_skb;
3165 switch (control & L2CAP_CTRL_SAR) {
3166 case L2CAP_SDU_UNSEGMENTED:
3167 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3172 err = sock_queue_rcv_skb(sk, skb);
3178 case L2CAP_SDU_START:
3179 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3184 pi->sdu_len = get_unaligned_le16(skb->data);
3187 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3193 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3195 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3196 pi->partial_sdu_len = skb->len;
3200 case L2CAP_SDU_CONTINUE:
3201 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3204 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3206 pi->partial_sdu_len += skb->len;
3207 if (pi->partial_sdu_len > pi->sdu_len)
3215 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3218 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3220 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3221 pi->partial_sdu_len += skb->len;
3223 if (pi->partial_sdu_len == pi->sdu_len) {
3224 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3225 err = sock_queue_rcv_skb(sk, _skb);
3239 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3241 struct sk_buff *skb;
3244 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3245 if (bt_cb(skb)->tx_seq != tx_seq)
3248 skb = skb_dequeue(SREJ_QUEUE(sk));
3249 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3250 l2cap_sar_reassembly_sdu(sk, skb, control);
3251 l2cap_pi(sk)->buffer_seq_srej =
3252 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3257 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3259 struct l2cap_pinfo *pi = l2cap_pi(sk);
3260 struct srej_list *l, *tmp;
3263 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3264 if (l->tx_seq == tx_seq) {
3269 control = L2CAP_SUPER_SELECT_REJECT;
3270 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3271 l2cap_send_sframe(pi, control);
3273 list_add_tail(&l->list, SREJ_LIST(sk));
3277 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3279 struct l2cap_pinfo *pi = l2cap_pi(sk);
3280 struct srej_list *new;
3283 while (tx_seq != pi->expected_tx_seq) {
3284 control = L2CAP_SUPER_SELECT_REJECT;
3285 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3286 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3287 control |= L2CAP_CTRL_POLL;
3288 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3290 l2cap_send_sframe(pi, control);
3292 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3293 new->tx_seq = pi->expected_tx_seq++;
3294 list_add_tail(&new->list, SREJ_LIST(sk));
3296 pi->expected_tx_seq++;
3299 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3301 struct l2cap_pinfo *pi = l2cap_pi(sk);
3302 u8 tx_seq = __get_txseq(rx_control);
3303 u8 req_seq = __get_reqseq(rx_control);
3305 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3308 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3310 pi->expected_ack_seq = req_seq;
3311 l2cap_drop_acked_frames(sk);
3313 if (tx_seq == pi->expected_tx_seq)
3316 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3317 struct srej_list *first;
3319 first = list_first_entry(SREJ_LIST(sk),
3320 struct srej_list, list);
3321 if (tx_seq == first->tx_seq) {
3322 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3323 l2cap_check_srej_gap(sk, tx_seq);
3325 list_del(&first->list);
3328 if (list_empty(SREJ_LIST(sk))) {
3329 pi->buffer_seq = pi->buffer_seq_srej;
3330 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3333 struct srej_list *l;
3334 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3336 list_for_each_entry(l, SREJ_LIST(sk), list) {
3337 if (l->tx_seq == tx_seq) {
3338 l2cap_resend_srejframe(sk, tx_seq);
3342 l2cap_send_srejframe(sk, tx_seq);
3345 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3347 INIT_LIST_HEAD(SREJ_LIST(sk));
3348 pi->buffer_seq_srej = pi->buffer_seq;
3350 __skb_queue_head_init(SREJ_QUEUE(sk));
3351 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3353 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3355 l2cap_send_srejframe(sk, tx_seq);
3360 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3362 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3363 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3367 if (rx_control & L2CAP_CTRL_FINAL) {
3368 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3369 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3371 sk->sk_send_head = TX_QUEUE(sk)->next;
3372 pi->next_tx_seq = pi->expected_ack_seq;
3373 l2cap_ertm_send(sk);
3377 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3379 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3383 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3384 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3385 tx_control |= L2CAP_SUPER_RCV_READY;
3386 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3387 l2cap_send_sframe(pi, tx_control);
3392 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3394 struct l2cap_pinfo *pi = l2cap_pi(sk);
3395 u8 tx_seq = __get_reqseq(rx_control);
3397 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3399 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3400 case L2CAP_SUPER_RCV_READY:
3401 if (rx_control & L2CAP_CTRL_POLL) {
3402 u16 control = L2CAP_CTRL_FINAL;
3403 control |= L2CAP_SUPER_RCV_READY |
3404 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3405 l2cap_send_sframe(l2cap_pi(sk), control);
3406 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408 } else if (rx_control & L2CAP_CTRL_FINAL) {
3409 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3410 pi->expected_ack_seq = tx_seq;
3411 l2cap_drop_acked_frames(sk);
3413 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3414 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3416 sk->sk_send_head = TX_QUEUE(sk)->next;
3417 pi->next_tx_seq = pi->expected_ack_seq;
3418 l2cap_ertm_send(sk);
3421 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3424 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3425 del_timer(&pi->monitor_timer);
3427 if (pi->unacked_frames > 0)
3428 __mod_retrans_timer();
3430 pi->expected_ack_seq = tx_seq;
3431 l2cap_drop_acked_frames(sk);
3433 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3434 (pi->unacked_frames > 0))
3435 __mod_retrans_timer();
3437 l2cap_ertm_send(sk);
3438 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3442 case L2CAP_SUPER_REJECT:
3443 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3445 pi->expected_ack_seq = __get_reqseq(rx_control);
3446 l2cap_drop_acked_frames(sk);
3448 if (rx_control & L2CAP_CTRL_FINAL) {
3449 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3450 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3452 sk->sk_send_head = TX_QUEUE(sk)->next;
3453 pi->next_tx_seq = pi->expected_ack_seq;
3454 l2cap_ertm_send(sk);
3457 sk->sk_send_head = TX_QUEUE(sk)->next;
3458 pi->next_tx_seq = pi->expected_ack_seq;
3459 l2cap_ertm_send(sk);
3461 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3462 pi->srej_save_reqseq = tx_seq;
3463 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3469 case L2CAP_SUPER_SELECT_REJECT:
3470 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3472 if (rx_control & L2CAP_CTRL_POLL) {
3473 l2cap_retransmit_frame(sk, tx_seq);
3474 pi->expected_ack_seq = tx_seq;
3475 l2cap_drop_acked_frames(sk);
3476 l2cap_ertm_send(sk);
3477 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3478 pi->srej_save_reqseq = tx_seq;
3479 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3481 } else if (rx_control & L2CAP_CTRL_FINAL) {
3482 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3483 pi->srej_save_reqseq == tx_seq)
3484 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3486 l2cap_retransmit_frame(sk, tx_seq);
3489 l2cap_retransmit_frame(sk, tx_seq);
3490 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3491 pi->srej_save_reqseq = tx_seq;
3492 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3497 case L2CAP_SUPER_RCV_NOT_READY:
3498 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3499 pi->expected_ack_seq = tx_seq;
3500 l2cap_drop_acked_frames(sk);
3502 del_timer(&l2cap_pi(sk)->retrans_timer);
3503 if (rx_control & L2CAP_CTRL_POLL) {
3504 u16 control = L2CAP_CTRL_FINAL;
3505 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3513 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3516 struct l2cap_pinfo *pi;
3521 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3523 BT_DBG("unknown cid 0x%4.4x", cid);
3529 BT_DBG("sk %p, len %d", sk, skb->len);
3531 if (sk->sk_state != BT_CONNECTED)
3535 case L2CAP_MODE_BASIC:
3536 /* If socket recv buffers overflows we drop data here
3537 * which is *bad* because L2CAP has to be reliable.
3538 * But we don't have any other choice. L2CAP doesn't
3539 * provide flow control mechanism. */
3541 if (pi->imtu < skb->len)
3544 if (!sock_queue_rcv_skb(sk, skb))
3548 case L2CAP_MODE_ERTM:
3549 control = get_unaligned_le16(skb->data);
3553 if (__is_sar_start(control))
3556 if (pi->fcs == L2CAP_FCS_CRC16)
3560 * We can just drop the corrupted I-frame here.
3561 * Receiver will miss it and start proper recovery
3562 * procedures and ask retransmission.
3564 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3567 if (l2cap_check_fcs(pi, skb))
3570 if (__is_iframe(control))
3571 err = l2cap_data_channel_iframe(sk, control, skb);
3573 err = l2cap_data_channel_sframe(sk, control, skb);
3579 case L2CAP_MODE_STREAMING:
3580 control = get_unaligned_le16(skb->data);
3584 if (__is_sar_start(control))
3587 if (pi->fcs == L2CAP_FCS_CRC16)
3590 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3593 if (l2cap_check_fcs(pi, skb))
3596 tx_seq = __get_txseq(control);
3598 if (pi->expected_tx_seq == tx_seq)
3599 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3601 pi->expected_tx_seq = tx_seq + 1;
3603 err = l2cap_sar_reassembly_sdu(sk, skb, control);
3608 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3622 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3626 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3630 BT_DBG("sk %p, len %d", sk, skb->len);
3632 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3635 if (l2cap_pi(sk)->imtu < skb->len)
3638 if (!sock_queue_rcv_skb(sk, skb))
3650 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3652 struct l2cap_hdr *lh = (void *) skb->data;
3656 skb_pull(skb, L2CAP_HDR_SIZE);
3657 cid = __le16_to_cpu(lh->cid);
3658 len = __le16_to_cpu(lh->len);
3660 if (len != skb->len) {
3665 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3668 case L2CAP_CID_SIGNALING:
3669 l2cap_sig_channel(conn, skb);
3672 case L2CAP_CID_CONN_LESS:
3673 psm = get_unaligned_le16(skb->data);
3675 l2cap_conless_channel(conn, psm, skb);
3679 l2cap_data_channel(conn, cid, skb);
3684 /* ---- L2CAP interface with lower layer (HCI) ---- */
3686 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3688 int exact = 0, lm1 = 0, lm2 = 0;
3689 register struct sock *sk;
3690 struct hlist_node *node;
3692 if (type != ACL_LINK)
3695 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3697 /* Find listening sockets and check their link_mode */
3698 read_lock(&l2cap_sk_list.lock);
3699 sk_for_each(sk, node, &l2cap_sk_list.head) {
3700 if (sk->sk_state != BT_LISTEN)
3703 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3704 lm1 |= HCI_LM_ACCEPT;
3705 if (l2cap_pi(sk)->role_switch)
3706 lm1 |= HCI_LM_MASTER;
3708 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3709 lm2 |= HCI_LM_ACCEPT;
3710 if (l2cap_pi(sk)->role_switch)
3711 lm2 |= HCI_LM_MASTER;
3714 read_unlock(&l2cap_sk_list.lock);
3716 return exact ? lm1 : lm2;
3719 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3721 struct l2cap_conn *conn;
3723 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3725 if (hcon->type != ACL_LINK)
3729 conn = l2cap_conn_add(hcon, status);
3731 l2cap_conn_ready(conn);
3733 l2cap_conn_del(hcon, bt_err(status));
3738 static int l2cap_disconn_ind(struct hci_conn *hcon)
3740 struct l2cap_conn *conn = hcon->l2cap_data;
3742 BT_DBG("hcon %p", hcon);
3744 if (hcon->type != ACL_LINK || !conn)
3747 return conn->disc_reason;
3750 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3752 BT_DBG("hcon %p reason %d", hcon, reason);
3754 if (hcon->type != ACL_LINK)
3757 l2cap_conn_del(hcon, bt_err(reason));
3762 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3764 if (sk->sk_type != SOCK_SEQPACKET)
3767 if (encrypt == 0x00) {
3768 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3769 l2cap_sock_clear_timer(sk);
3770 l2cap_sock_set_timer(sk, HZ * 5);
3771 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3772 __l2cap_sock_close(sk, ECONNREFUSED);
3774 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3775 l2cap_sock_clear_timer(sk);
3779 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3781 struct l2cap_chan_list *l;
3782 struct l2cap_conn *conn = hcon->l2cap_data;
3788 l = &conn->chan_list;
3790 BT_DBG("conn %p", conn);
3792 read_lock(&l->lock);
3794 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3797 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3802 if (!status && (sk->sk_state == BT_CONNECTED ||
3803 sk->sk_state == BT_CONFIG)) {
3804 l2cap_check_encryption(sk, encrypt);
3809 if (sk->sk_state == BT_CONNECT) {
3811 struct l2cap_conn_req req;
3812 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3813 req.psm = l2cap_pi(sk)->psm;
3815 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3817 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3818 L2CAP_CONN_REQ, sizeof(req), &req);
3820 l2cap_sock_clear_timer(sk);
3821 l2cap_sock_set_timer(sk, HZ / 10);
3823 } else if (sk->sk_state == BT_CONNECT2) {
3824 struct l2cap_conn_rsp rsp;
3828 sk->sk_state = BT_CONFIG;
3829 result = L2CAP_CR_SUCCESS;
3831 sk->sk_state = BT_DISCONN;
3832 l2cap_sock_set_timer(sk, HZ / 10);
3833 result = L2CAP_CR_SEC_BLOCK;
3836 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3837 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3840 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3841 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3847 read_unlock(&l->lock);
3852 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3854 struct l2cap_conn *conn = hcon->l2cap_data;
3856 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3859 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3861 if (flags & ACL_START) {
3862 struct l2cap_hdr *hdr;
3866 BT_ERR("Unexpected start frame (len %d)", skb->len);
3867 kfree_skb(conn->rx_skb);
3868 conn->rx_skb = NULL;
3870 l2cap_conn_unreliable(conn, ECOMM);
3874 BT_ERR("Frame is too short (len %d)", skb->len);
3875 l2cap_conn_unreliable(conn, ECOMM);
3879 hdr = (struct l2cap_hdr *) skb->data;
3880 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3882 if (len == skb->len) {
3883 /* Complete frame received */
3884 l2cap_recv_frame(conn, skb);
3888 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3890 if (skb->len > len) {
3891 BT_ERR("Frame is too long (len %d, expected len %d)",
3893 l2cap_conn_unreliable(conn, ECOMM);
3897 /* Allocate skb for the complete frame (with header) */
3898 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3902 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3904 conn->rx_len = len - skb->len;
3906 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3908 if (!conn->rx_len) {
3909 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3910 l2cap_conn_unreliable(conn, ECOMM);
3914 if (skb->len > conn->rx_len) {
3915 BT_ERR("Fragment is too long (len %d, expected %d)",
3916 skb->len, conn->rx_len);
3917 kfree_skb(conn->rx_skb);
3918 conn->rx_skb = NULL;
3920 l2cap_conn_unreliable(conn, ECOMM);
3924 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3926 conn->rx_len -= skb->len;
3928 if (!conn->rx_len) {
3929 /* Complete frame received */
3930 l2cap_recv_frame(conn, conn->rx_skb);
3931 conn->rx_skb = NULL;
3940 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3943 struct hlist_node *node;
3946 read_lock_bh(&l2cap_sk_list.lock);
3948 sk_for_each(sk, node, &l2cap_sk_list.head) {
3949 struct l2cap_pinfo *pi = l2cap_pi(sk);
3951 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3952 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3953 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3954 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3957 read_unlock_bh(&l2cap_sk_list.lock);
3962 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3964 static const struct proto_ops l2cap_sock_ops = {
3965 .family = PF_BLUETOOTH,
3966 .owner = THIS_MODULE,
3967 .release = l2cap_sock_release,
3968 .bind = l2cap_sock_bind,
3969 .connect = l2cap_sock_connect,
3970 .listen = l2cap_sock_listen,
3971 .accept = l2cap_sock_accept,
3972 .getname = l2cap_sock_getname,
3973 .sendmsg = l2cap_sock_sendmsg,
3974 .recvmsg = l2cap_sock_recvmsg,
3975 .poll = bt_sock_poll,
3976 .ioctl = bt_sock_ioctl,
3977 .mmap = sock_no_mmap,
3978 .socketpair = sock_no_socketpair,
3979 .shutdown = l2cap_sock_shutdown,
3980 .setsockopt = l2cap_sock_setsockopt,
3981 .getsockopt = l2cap_sock_getsockopt
3984 static const struct net_proto_family l2cap_sock_family_ops = {
3985 .family = PF_BLUETOOTH,
3986 .owner = THIS_MODULE,
3987 .create = l2cap_sock_create,
3990 static struct hci_proto l2cap_hci_proto = {
3992 .id = HCI_PROTO_L2CAP,
3993 .connect_ind = l2cap_connect_ind,
3994 .connect_cfm = l2cap_connect_cfm,
3995 .disconn_ind = l2cap_disconn_ind,
3996 .disconn_cfm = l2cap_disconn_cfm,
3997 .security_cfm = l2cap_security_cfm,
3998 .recv_acldata = l2cap_recv_acldata
4001 static int __init l2cap_init(void)
4005 err = proto_register(&l2cap_proto, 0);
4009 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4011 BT_ERR("L2CAP socket registration failed");
4015 err = hci_register_proto(&l2cap_hci_proto);
4017 BT_ERR("L2CAP protocol registration failed");
4018 bt_sock_unregister(BTPROTO_L2CAP);
4022 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
4023 BT_ERR("Failed to create L2CAP info file");
4025 BT_INFO("L2CAP ver %s", VERSION);
4026 BT_INFO("L2CAP socket layer initialized");
4031 proto_unregister(&l2cap_proto);
4035 static void __exit l2cap_exit(void)
4037 class_remove_file(bt_class, &class_attr_l2cap);
4039 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4040 BT_ERR("L2CAP socket unregistration failed");
4042 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4043 BT_ERR("L2CAP protocol unregistration failed");
4045 proto_unregister(&l2cap_proto);
4048 void l2cap_load(void)
4050 /* Dummy function to trigger automatic L2CAP module loading by
4051 * other modules that use L2CAP sockets but don't use any other
4052 * symbols from it. */
4055 EXPORT_SYMBOL(l2cap_load);
4057 module_init(l2cap_init);
4058 module_exit(l2cap_exit);
4060 module_param(enable_ertm, bool, 0644);
4061 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4063 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4064 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4065 MODULE_VERSION(VERSION);
4066 MODULE_LICENSE("GPL");
4067 MODULE_ALIAS("bt-proto-0");