2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
95 __l2cap_sock_close(sk, reason);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 s = __l2cap_get_chan_by_scid(l, cid);
145 read_unlock(&l->lock);
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 s = __l2cap_get_chan_by_ident(l, ident);
166 read_unlock(&l->lock);
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
203 l2cap_pi(next)->prev_c = prev;
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
240 bt_accept_enqueue(parent, sk);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
297 auth_type = HCI_AT_NO_BONDING;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
323 spin_unlock_bh(&conn->lock);
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 skb = bt_skb_alloc(count, GFP_ATOMIC);
359 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
360 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
361 lh->cid = cpu_to_le16(pi->dcid);
362 put_unaligned_le16(control, skb_put(skb, 2));
364 if (pi->fcs == L2CAP_FCS_CRC16) {
365 u16 fcs = crc16(0, (u8 *)lh, count - 2);
366 put_unaligned_le16(fcs, skb_put(skb, 2));
369 return hci_send_acl(pi->conn->hcon, skb, 0);
372 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
374 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
375 control |= L2CAP_SUPER_RCV_NOT_READY;
377 control |= L2CAP_SUPER_RCV_READY;
379 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
381 return l2cap_send_sframe(pi, control);
384 static void l2cap_do_start(struct sock *sk)
386 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
388 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
389 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
392 if (l2cap_check_security(sk)) {
393 struct l2cap_conn_req req;
394 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
395 req.psm = l2cap_pi(sk)->psm;
397 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
399 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
400 L2CAP_CONN_REQ, sizeof(req), &req);
403 struct l2cap_info_req req;
404 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
406 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
407 conn->info_ident = l2cap_get_ident(conn);
409 mod_timer(&conn->info_timer, jiffies +
410 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
412 l2cap_send_cmd(conn, conn->info_ident,
413 L2CAP_INFO_REQ, sizeof(req), &req);
417 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
419 struct l2cap_disconn_req req;
421 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
422 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
423 l2cap_send_cmd(conn, l2cap_get_ident(conn),
424 L2CAP_DISCONN_REQ, sizeof(req), &req);
427 /* ---- L2CAP connections ---- */
428 static void l2cap_conn_start(struct l2cap_conn *conn)
430 struct l2cap_chan_list *l = &conn->chan_list;
433 BT_DBG("conn %p", conn);
437 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
440 if (sk->sk_type != SOCK_SEQPACKET) {
445 if (sk->sk_state == BT_CONNECT) {
446 if (l2cap_check_security(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
453 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
454 L2CAP_CONN_REQ, sizeof(req), &req);
456 } else if (sk->sk_state == BT_CONNECT2) {
457 struct l2cap_conn_rsp rsp;
458 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
459 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
461 if (l2cap_check_security(sk)) {
462 if (bt_sk(sk)->defer_setup) {
463 struct sock *parent = bt_sk(sk)->parent;
464 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
465 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
466 parent->sk_data_ready(parent, 0);
469 sk->sk_state = BT_CONFIG;
470 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
471 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
478 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
479 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
485 read_unlock(&l->lock);
488 static void l2cap_conn_ready(struct l2cap_conn *conn)
490 struct l2cap_chan_list *l = &conn->chan_list;
493 BT_DBG("conn %p", conn);
497 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (sk->sk_type != SOCK_SEQPACKET) {
501 l2cap_sock_clear_timer(sk);
502 sk->sk_state = BT_CONNECTED;
503 sk->sk_state_change(sk);
504 } else if (sk->sk_state == BT_CONNECT)
510 read_unlock(&l->lock);
513 /* Notify sockets that we cannot guaranty reliability anymore */
514 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
516 struct l2cap_chan_list *l = &conn->chan_list;
519 BT_DBG("conn %p", conn);
523 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
524 if (l2cap_pi(sk)->force_reliable)
528 read_unlock(&l->lock);
531 static void l2cap_info_timeout(unsigned long arg)
533 struct l2cap_conn *conn = (void *) arg;
535 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
536 conn->info_ident = 0;
538 l2cap_conn_start(conn);
541 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
543 struct l2cap_conn *conn = hcon->l2cap_data;
548 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
552 hcon->l2cap_data = conn;
555 BT_DBG("hcon %p conn %p", hcon, conn);
557 conn->mtu = hcon->hdev->acl_mtu;
558 conn->src = &hcon->hdev->bdaddr;
559 conn->dst = &hcon->dst;
563 spin_lock_init(&conn->lock);
564 rwlock_init(&conn->chan_list.lock);
566 setup_timer(&conn->info_timer, l2cap_info_timeout,
567 (unsigned long) conn);
569 conn->disc_reason = 0x13;
574 static void l2cap_conn_del(struct hci_conn *hcon, int err)
576 struct l2cap_conn *conn = hcon->l2cap_data;
582 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
584 kfree_skb(conn->rx_skb);
587 while ((sk = conn->chan_list.head)) {
589 l2cap_chan_del(sk, err);
594 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
595 del_timer_sync(&conn->info_timer);
597 hcon->l2cap_data = NULL;
601 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
603 struct l2cap_chan_list *l = &conn->chan_list;
604 write_lock_bh(&l->lock);
605 __l2cap_chan_add(conn, sk, parent);
606 write_unlock_bh(&l->lock);
609 /* ---- Socket interface ---- */
610 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
613 struct hlist_node *node;
614 sk_for_each(sk, node, &l2cap_sk_list.head)
615 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
622 /* Find socket with psm and source bdaddr.
623 * Returns closest match.
625 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
627 struct sock *sk = NULL, *sk1 = NULL;
628 struct hlist_node *node;
630 sk_for_each(sk, node, &l2cap_sk_list.head) {
631 if (state && sk->sk_state != state)
634 if (l2cap_pi(sk)->psm == psm) {
636 if (!bacmp(&bt_sk(sk)->src, src))
640 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
644 return node ? sk : sk1;
647 /* Find socket with given address (psm, src).
648 * Returns locked socket */
649 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
652 read_lock(&l2cap_sk_list.lock);
653 s = __l2cap_get_sock_by_psm(state, psm, src);
656 read_unlock(&l2cap_sk_list.lock);
660 static void l2cap_sock_destruct(struct sock *sk)
664 skb_queue_purge(&sk->sk_receive_queue);
665 skb_queue_purge(&sk->sk_write_queue);
668 static void l2cap_sock_cleanup_listen(struct sock *parent)
672 BT_DBG("parent %p", parent);
674 /* Close not yet accepted channels */
675 while ((sk = bt_accept_dequeue(parent, NULL)))
676 l2cap_sock_close(sk);
678 parent->sk_state = BT_CLOSED;
679 sock_set_flag(parent, SOCK_ZAPPED);
682 /* Kill socket (only if zapped and orphan)
683 * Must be called on unlocked socket.
685 static void l2cap_sock_kill(struct sock *sk)
687 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
690 BT_DBG("sk %p state %d", sk, sk->sk_state);
692 /* Kill poor orphan */
693 bt_sock_unlink(&l2cap_sk_list, sk);
694 sock_set_flag(sk, SOCK_DEAD);
698 static void __l2cap_sock_close(struct sock *sk, int reason)
700 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
702 switch (sk->sk_state) {
704 l2cap_sock_cleanup_listen(sk);
709 if (sk->sk_type == SOCK_SEQPACKET) {
710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
712 sk->sk_state = BT_DISCONN;
713 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
714 l2cap_send_disconn_req(conn, sk);
716 l2cap_chan_del(sk, reason);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 struct l2cap_conn_rsp rsp;
725 if (bt_sk(sk)->defer_setup)
726 result = L2CAP_CR_SEC_BLOCK;
728 result = L2CAP_CR_BAD_PSM;
730 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
731 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
732 rsp.result = cpu_to_le16(result);
733 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
734 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
735 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
737 l2cap_chan_del(sk, reason);
742 l2cap_chan_del(sk, reason);
746 sock_set_flag(sk, SOCK_ZAPPED);
751 /* Must be called on unlocked socket. */
752 static void l2cap_sock_close(struct sock *sk)
754 l2cap_sock_clear_timer(sk);
756 __l2cap_sock_close(sk, ECONNRESET);
761 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
763 struct l2cap_pinfo *pi = l2cap_pi(sk);
768 sk->sk_type = parent->sk_type;
769 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
771 pi->imtu = l2cap_pi(parent)->imtu;
772 pi->omtu = l2cap_pi(parent)->omtu;
773 pi->mode = l2cap_pi(parent)->mode;
774 pi->fcs = l2cap_pi(parent)->fcs;
775 pi->sec_level = l2cap_pi(parent)->sec_level;
776 pi->role_switch = l2cap_pi(parent)->role_switch;
777 pi->force_reliable = l2cap_pi(parent)->force_reliable;
779 pi->imtu = L2CAP_DEFAULT_MTU;
781 pi->mode = L2CAP_MODE_BASIC;
782 pi->fcs = L2CAP_FCS_CRC16;
783 pi->sec_level = BT_SECURITY_LOW;
785 pi->force_reliable = 0;
788 /* Default config options */
790 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
791 skb_queue_head_init(TX_QUEUE(sk));
792 skb_queue_head_init(SREJ_QUEUE(sk));
793 INIT_LIST_HEAD(SREJ_LIST(sk));
796 static struct proto l2cap_proto = {
798 .owner = THIS_MODULE,
799 .obj_size = sizeof(struct l2cap_pinfo)
802 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
806 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
810 sock_init_data(sock, sk);
811 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
813 sk->sk_destruct = l2cap_sock_destruct;
814 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
816 sock_reset_flag(sk, SOCK_ZAPPED);
818 sk->sk_protocol = proto;
819 sk->sk_state = BT_OPEN;
821 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
823 bt_sock_link(&l2cap_sk_list, sk);
827 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
832 BT_DBG("sock %p", sock);
834 sock->state = SS_UNCONNECTED;
836 if (sock->type != SOCK_SEQPACKET &&
837 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
838 return -ESOCKTNOSUPPORT;
840 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
843 sock->ops = &l2cap_sock_ops;
845 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
849 l2cap_sock_init(sk, NULL);
853 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
855 struct sock *sk = sock->sk;
856 struct sockaddr_l2 la;
861 if (!addr || addr->sa_family != AF_BLUETOOTH)
864 memset(&la, 0, sizeof(la));
865 len = min_t(unsigned int, sizeof(la), alen);
866 memcpy(&la, addr, len);
873 if (sk->sk_state != BT_OPEN) {
878 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
879 !capable(CAP_NET_BIND_SERVICE)) {
884 write_lock_bh(&l2cap_sk_list.lock);
886 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
889 /* Save source address */
890 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
891 l2cap_pi(sk)->psm = la.l2_psm;
892 l2cap_pi(sk)->sport = la.l2_psm;
893 sk->sk_state = BT_BOUND;
895 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
896 __le16_to_cpu(la.l2_psm) == 0x0003)
897 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
900 write_unlock_bh(&l2cap_sk_list.lock);
907 static int l2cap_do_connect(struct sock *sk)
909 bdaddr_t *src = &bt_sk(sk)->src;
910 bdaddr_t *dst = &bt_sk(sk)->dst;
911 struct l2cap_conn *conn;
912 struct hci_conn *hcon;
913 struct hci_dev *hdev;
917 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
920 hdev = hci_get_route(dst, src);
922 return -EHOSTUNREACH;
924 hci_dev_lock_bh(hdev);
928 if (sk->sk_type == SOCK_RAW) {
929 switch (l2cap_pi(sk)->sec_level) {
930 case BT_SECURITY_HIGH:
931 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
933 case BT_SECURITY_MEDIUM:
934 auth_type = HCI_AT_DEDICATED_BONDING;
937 auth_type = HCI_AT_NO_BONDING;
940 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
941 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
942 auth_type = HCI_AT_NO_BONDING_MITM;
944 auth_type = HCI_AT_NO_BONDING;
946 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
947 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
949 switch (l2cap_pi(sk)->sec_level) {
950 case BT_SECURITY_HIGH:
951 auth_type = HCI_AT_GENERAL_BONDING_MITM;
953 case BT_SECURITY_MEDIUM:
954 auth_type = HCI_AT_GENERAL_BONDING;
957 auth_type = HCI_AT_NO_BONDING;
962 hcon = hci_connect(hdev, ACL_LINK, dst,
963 l2cap_pi(sk)->sec_level, auth_type);
967 conn = l2cap_conn_add(hcon, 0);
975 /* Update source addr of the socket */
976 bacpy(src, conn->src);
978 l2cap_chan_add(conn, sk, NULL);
980 sk->sk_state = BT_CONNECT;
981 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
983 if (hcon->state == BT_CONNECTED) {
984 if (sk->sk_type != SOCK_SEQPACKET) {
985 l2cap_sock_clear_timer(sk);
986 sk->sk_state = BT_CONNECTED;
992 hci_dev_unlock_bh(hdev);
997 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
999 struct sock *sk = sock->sk;
1000 struct sockaddr_l2 la;
1003 BT_DBG("sk %p", sk);
1005 if (!addr || alen < sizeof(addr->sa_family) ||
1006 addr->sa_family != AF_BLUETOOTH)
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1018 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1023 switch (l2cap_pi(sk)->mode) {
1024 case L2CAP_MODE_BASIC:
1026 case L2CAP_MODE_ERTM:
1027 case L2CAP_MODE_STREAMING:
1036 switch (sk->sk_state) {
1040 /* Already connecting */
1044 /* Already connected */
1057 /* Set destination address and psm */
1058 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1059 l2cap_pi(sk)->psm = la.l2_psm;
1061 err = l2cap_do_connect(sk);
1066 err = bt_sock_wait_state(sk, BT_CONNECTED,
1067 sock_sndtimeo(sk, flags & O_NONBLOCK));
1073 static int l2cap_sock_listen(struct socket *sock, int backlog)
1075 struct sock *sk = sock->sk;
1078 BT_DBG("sk %p backlog %d", sk, backlog);
1082 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1087 switch (l2cap_pi(sk)->mode) {
1088 case L2CAP_MODE_BASIC:
1090 case L2CAP_MODE_ERTM:
1091 case L2CAP_MODE_STREAMING:
1100 if (!l2cap_pi(sk)->psm) {
1101 bdaddr_t *src = &bt_sk(sk)->src;
1106 write_lock_bh(&l2cap_sk_list.lock);
1108 for (psm = 0x1001; psm < 0x1100; psm += 2)
1109 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1110 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1111 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1116 write_unlock_bh(&l2cap_sk_list.lock);
1122 sk->sk_max_ack_backlog = backlog;
1123 sk->sk_ack_backlog = 0;
1124 sk->sk_state = BT_LISTEN;
1131 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1133 DECLARE_WAITQUEUE(wait, current);
1134 struct sock *sk = sock->sk, *nsk;
1138 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1140 if (sk->sk_state != BT_LISTEN) {
1145 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE);
1159 timeo = schedule_timeout(timeo);
1160 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1162 if (sk->sk_state != BT_LISTEN) {
1167 if (signal_pending(current)) {
1168 err = sock_intr_errno(timeo);
1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk_sleep(sk), &wait);
1178 newsock->state = SS_CONNECTED;
1180 BT_DBG("new socket %p", nsk);
1187 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1189 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1190 struct sock *sk = sock->sk;
1192 BT_DBG("sock %p, sk %p", sock, sk);
1194 addr->sa_family = AF_BLUETOOTH;
1195 *len = sizeof(struct sockaddr_l2);
1198 la->l2_psm = l2cap_pi(sk)->psm;
1199 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1200 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1202 la->l2_psm = l2cap_pi(sk)->sport;
1203 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1204 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1210 static void l2cap_monitor_timeout(unsigned long arg)
1212 struct sock *sk = (void *) arg;
1216 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1217 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1222 l2cap_pi(sk)->retry_count++;
1223 __mod_monitor_timer();
1225 control = L2CAP_CTRL_POLL;
1226 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1230 static void l2cap_retrans_timeout(unsigned long arg)
1232 struct sock *sk = (void *) arg;
1236 l2cap_pi(sk)->retry_count = 1;
1237 __mod_monitor_timer();
1239 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1241 control = L2CAP_CTRL_POLL;
1242 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1246 static void l2cap_drop_acked_frames(struct sock *sk)
1248 struct sk_buff *skb;
1250 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1251 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1254 skb = skb_dequeue(TX_QUEUE(sk));
1257 l2cap_pi(sk)->unacked_frames--;
1260 if (!l2cap_pi(sk)->unacked_frames)
1261 del_timer(&l2cap_pi(sk)->retrans_timer);
1266 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1268 struct l2cap_pinfo *pi = l2cap_pi(sk);
1271 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1273 err = hci_send_acl(pi->conn->hcon, skb, 0);
1280 static int l2cap_streaming_send(struct sock *sk)
1282 struct sk_buff *skb, *tx_skb;
1283 struct l2cap_pinfo *pi = l2cap_pi(sk);
1287 while ((skb = sk->sk_send_head)) {
1288 tx_skb = skb_clone(skb, GFP_ATOMIC);
1290 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1291 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1292 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1294 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1295 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1296 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1299 err = l2cap_do_send(sk, tx_skb);
1301 l2cap_send_disconn_req(pi->conn, sk);
1305 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1307 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1308 sk->sk_send_head = NULL;
1310 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1312 skb = skb_dequeue(TX_QUEUE(sk));
1318 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1320 struct l2cap_pinfo *pi = l2cap_pi(sk);
1321 struct sk_buff *skb, *tx_skb;
1325 skb = skb_peek(TX_QUEUE(sk));
1327 if (bt_cb(skb)->tx_seq != tx_seq) {
1328 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1330 skb = skb_queue_next(TX_QUEUE(sk), skb);
1334 if (pi->remote_max_tx &&
1335 bt_cb(skb)->retries == pi->remote_max_tx) {
1336 l2cap_send_disconn_req(pi->conn, sk);
1340 tx_skb = skb_clone(skb, GFP_ATOMIC);
1341 bt_cb(skb)->retries++;
1342 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1343 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1344 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1345 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1347 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1348 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1349 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 err = l2cap_do_send(sk, tx_skb);
1354 l2cap_send_disconn_req(pi->conn, sk);
1362 static int l2cap_ertm_send(struct sock *sk)
1364 struct sk_buff *skb, *tx_skb;
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1369 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1372 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1373 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1375 if (pi->remote_max_tx &&
1376 bt_cb(skb)->retries == pi->remote_max_tx) {
1377 l2cap_send_disconn_req(pi->conn, sk);
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1386 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1387 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1393 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1396 err = l2cap_do_send(sk, tx_skb);
1398 l2cap_send_disconn_req(pi->conn, sk);
1401 __mod_retrans_timer();
1403 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1404 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1406 pi->unacked_frames++;
1408 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1409 sk->sk_send_head = NULL;
1411 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1417 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1419 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1420 struct sk_buff **frag;
1423 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1430 /* Continuation fragments (no L2CAP header) */
1431 frag = &skb_shinfo(skb)->frag_list;
1433 count = min_t(unsigned int, conn->mtu, len);
1435 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1438 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1444 frag = &(*frag)->next;
1450 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1452 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1453 struct sk_buff *skb;
1454 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1455 struct l2cap_hdr *lh;
1457 BT_DBG("sk %p len %d", sk, (int)len);
1459 count = min_t(unsigned int, (conn->mtu - hlen), len);
1460 skb = bt_skb_send_alloc(sk, count + hlen,
1461 msg->msg_flags & MSG_DONTWAIT, &err);
1463 return ERR_PTR(-ENOMEM);
1465 /* Create L2CAP header */
1466 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1467 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1468 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1469 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1471 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1472 if (unlikely(err < 0)) {
1474 return ERR_PTR(err);
1479 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1481 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1482 struct sk_buff *skb;
1483 int err, count, hlen = L2CAP_HDR_SIZE;
1484 struct l2cap_hdr *lh;
1486 BT_DBG("sk %p len %d", sk, (int)len);
1488 count = min_t(unsigned int, (conn->mtu - hlen), len);
1489 skb = bt_skb_send_alloc(sk, count + hlen,
1490 msg->msg_flags & MSG_DONTWAIT, &err);
1492 return ERR_PTR(-ENOMEM);
1494 /* Create L2CAP header */
1495 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1496 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1497 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1499 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1500 if (unlikely(err < 0)) {
1502 return ERR_PTR(err);
1507 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1509 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d", sk, (int)len);
1519 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1522 count = min_t(unsigned int, (conn->mtu - hlen), len);
1523 skb = bt_skb_send_alloc(sk, count + hlen,
1524 msg->msg_flags & MSG_DONTWAIT, &err);
1526 return ERR_PTR(-ENOMEM);
1528 /* Create L2CAP header */
1529 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1530 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1531 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1532 put_unaligned_le16(control, skb_put(skb, 2));
1534 put_unaligned_le16(sdulen, skb_put(skb, 2));
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1539 return ERR_PTR(err);
1542 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1543 put_unaligned_le16(0, skb_put(skb, 2));
1545 bt_cb(skb)->retries = 0;
1549 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1551 struct l2cap_pinfo *pi = l2cap_pi(sk);
1552 struct sk_buff *skb;
1553 struct sk_buff_head sar_queue;
1557 __skb_queue_head_init(&sar_queue);
1558 control = L2CAP_SDU_START;
1559 skb = l2cap_create_iframe_pdu(sk, msg, pi->max_pdu_size, control, len);
1561 return PTR_ERR(skb);
1563 __skb_queue_tail(&sar_queue, skb);
1564 len -= pi->max_pdu_size;
1565 size +=pi->max_pdu_size;
1571 if (len > pi->max_pdu_size) {
1572 control |= L2CAP_SDU_CONTINUE;
1573 buflen = pi->max_pdu_size;
1575 control |= L2CAP_SDU_END;
1579 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1581 skb_queue_purge(&sar_queue);
1582 return PTR_ERR(skb);
1585 __skb_queue_tail(&sar_queue, skb);
1590 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1591 if (sk->sk_send_head == NULL)
1592 sk->sk_send_head = sar_queue.next;
1597 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1599 struct sock *sk = sock->sk;
1600 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 struct sk_buff *skb;
1605 BT_DBG("sock %p, sk %p", sock, sk);
1607 err = sock_error(sk);
1611 if (msg->msg_flags & MSG_OOB)
1616 if (sk->sk_state != BT_CONNECTED) {
1621 /* Connectionless channel */
1622 if (sk->sk_type == SOCK_DGRAM) {
1623 skb = l2cap_create_connless_pdu(sk, msg, len);
1627 err = l2cap_do_send(sk, skb);
1632 case L2CAP_MODE_BASIC:
1633 /* Check outgoing MTU */
1634 if (len > pi->omtu) {
1639 /* Create a basic PDU */
1640 skb = l2cap_create_basic_pdu(sk, msg, len);
1646 err = l2cap_do_send(sk, skb);
1651 case L2CAP_MODE_ERTM:
1652 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */
1654 if (len <= pi->max_pdu_size) {
1655 control = L2CAP_SDU_UNSEGMENTED;
1656 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1661 __skb_queue_tail(TX_QUEUE(sk), skb);
1662 if (sk->sk_send_head == NULL)
1663 sk->sk_send_head = skb;
1665 /* Segment SDU into multiples PDUs */
1666 err = l2cap_sar_segment_sdu(sk, msg, len);
1671 if (pi->mode == L2CAP_MODE_STREAMING)
1672 err = l2cap_streaming_send(sk);
1674 err = l2cap_ertm_send(sk);
1681 BT_DBG("bad state %1.1x", pi->mode);
1690 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1692 struct sock *sk = sock->sk;
1696 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1697 struct l2cap_conn_rsp rsp;
1699 sk->sk_state = BT_CONFIG;
1701 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1702 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1703 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1704 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1705 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1706 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1714 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1717 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1719 struct sock *sk = sock->sk;
1720 struct l2cap_options opts;
1724 BT_DBG("sk %p", sk);
1730 opts.imtu = l2cap_pi(sk)->imtu;
1731 opts.omtu = l2cap_pi(sk)->omtu;
1732 opts.flush_to = l2cap_pi(sk)->flush_to;
1733 opts.mode = l2cap_pi(sk)->mode;
1734 opts.fcs = l2cap_pi(sk)->fcs;
1736 len = min_t(unsigned int, sizeof(opts), optlen);
1737 if (copy_from_user((char *) &opts, optval, len)) {
1742 l2cap_pi(sk)->imtu = opts.imtu;
1743 l2cap_pi(sk)->omtu = opts.omtu;
1744 l2cap_pi(sk)->mode = opts.mode;
1745 l2cap_pi(sk)->fcs = opts.fcs;
1749 if (get_user(opt, (u32 __user *) optval)) {
1754 if (opt & L2CAP_LM_AUTH)
1755 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1756 if (opt & L2CAP_LM_ENCRYPT)
1757 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1758 if (opt & L2CAP_LM_SECURE)
1759 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1761 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1762 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1774 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1776 struct sock *sk = sock->sk;
1777 struct bt_security sec;
1781 BT_DBG("sk %p", sk);
1783 if (level == SOL_L2CAP)
1784 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1786 if (level != SOL_BLUETOOTH)
1787 return -ENOPROTOOPT;
1793 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1798 sec.level = BT_SECURITY_LOW;
1800 len = min_t(unsigned int, sizeof(sec), optlen);
1801 if (copy_from_user((char *) &sec, optval, len)) {
1806 if (sec.level < BT_SECURITY_LOW ||
1807 sec.level > BT_SECURITY_HIGH) {
1812 l2cap_pi(sk)->sec_level = sec.level;
1815 case BT_DEFER_SETUP:
1816 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1821 if (get_user(opt, (u32 __user *) optval)) {
1826 bt_sk(sk)->defer_setup = opt;
1838 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1840 struct sock *sk = sock->sk;
1841 struct l2cap_options opts;
1842 struct l2cap_conninfo cinfo;
1846 BT_DBG("sk %p", sk);
1848 if (get_user(len, optlen))
1855 opts.imtu = l2cap_pi(sk)->imtu;
1856 opts.omtu = l2cap_pi(sk)->omtu;
1857 opts.flush_to = l2cap_pi(sk)->flush_to;
1858 opts.mode = l2cap_pi(sk)->mode;
1859 opts.fcs = l2cap_pi(sk)->fcs;
1861 len = min_t(unsigned int, len, sizeof(opts));
1862 if (copy_to_user(optval, (char *) &opts, len))
1868 switch (l2cap_pi(sk)->sec_level) {
1869 case BT_SECURITY_LOW:
1870 opt = L2CAP_LM_AUTH;
1872 case BT_SECURITY_MEDIUM:
1873 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1875 case BT_SECURITY_HIGH:
1876 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1884 if (l2cap_pi(sk)->role_switch)
1885 opt |= L2CAP_LM_MASTER;
1887 if (l2cap_pi(sk)->force_reliable)
1888 opt |= L2CAP_LM_RELIABLE;
1890 if (put_user(opt, (u32 __user *) optval))
1894 case L2CAP_CONNINFO:
1895 if (sk->sk_state != BT_CONNECTED &&
1896 !(sk->sk_state == BT_CONNECT2 &&
1897 bt_sk(sk)->defer_setup)) {
1902 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1903 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1905 len = min_t(unsigned int, len, sizeof(cinfo));
1906 if (copy_to_user(optval, (char *) &cinfo, len))
1920 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1922 struct sock *sk = sock->sk;
1923 struct bt_security sec;
1926 BT_DBG("sk %p", sk);
1928 if (level == SOL_L2CAP)
1929 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1931 if (level != SOL_BLUETOOTH)
1932 return -ENOPROTOOPT;
1934 if (get_user(len, optlen))
1941 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1946 sec.level = l2cap_pi(sk)->sec_level;
1948 len = min_t(unsigned int, len, sizeof(sec));
1949 if (copy_to_user(optval, (char *) &sec, len))
1954 case BT_DEFER_SETUP:
1955 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1960 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1974 static int l2cap_sock_shutdown(struct socket *sock, int how)
1976 struct sock *sk = sock->sk;
1979 BT_DBG("sock %p, sk %p", sock, sk);
1985 if (!sk->sk_shutdown) {
1986 sk->sk_shutdown = SHUTDOWN_MASK;
1987 l2cap_sock_clear_timer(sk);
1988 __l2cap_sock_close(sk, 0);
1990 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1991 err = bt_sock_wait_state(sk, BT_CLOSED,
1998 static int l2cap_sock_release(struct socket *sock)
2000 struct sock *sk = sock->sk;
2003 BT_DBG("sock %p, sk %p", sock, sk);
2008 err = l2cap_sock_shutdown(sock, 2);
2011 l2cap_sock_kill(sk);
2015 static void l2cap_chan_ready(struct sock *sk)
2017 struct sock *parent = bt_sk(sk)->parent;
2019 BT_DBG("sk %p, parent %p", sk, parent);
2021 l2cap_pi(sk)->conf_state = 0;
2022 l2cap_sock_clear_timer(sk);
2025 /* Outgoing channel.
2026 * Wake up socket sleeping on connect.
2028 sk->sk_state = BT_CONNECTED;
2029 sk->sk_state_change(sk);
2031 /* Incoming channel.
2032 * Wake up socket sleeping on accept.
2034 parent->sk_data_ready(parent, 0);
2038 /* Copy frame to all raw sockets on that connection */
2039 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2041 struct l2cap_chan_list *l = &conn->chan_list;
2042 struct sk_buff *nskb;
2045 BT_DBG("conn %p", conn);
2047 read_lock(&l->lock);
2048 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2049 if (sk->sk_type != SOCK_RAW)
2052 /* Don't send frame to the socket it came from */
2055 nskb = skb_clone(skb, GFP_ATOMIC);
2059 if (sock_queue_rcv_skb(sk, nskb))
2062 read_unlock(&l->lock);
2065 /* ---- L2CAP signalling commands ---- */
2066 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2067 u8 code, u8 ident, u16 dlen, void *data)
2069 struct sk_buff *skb, **frag;
2070 struct l2cap_cmd_hdr *cmd;
2071 struct l2cap_hdr *lh;
2074 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2075 conn, code, ident, dlen);
2077 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2078 count = min_t(unsigned int, conn->mtu, len);
2080 skb = bt_skb_alloc(count, GFP_ATOMIC);
2084 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2085 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2086 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2088 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2091 cmd->len = cpu_to_le16(dlen);
2094 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2095 memcpy(skb_put(skb, count), data, count);
2101 /* Continuation fragments (no L2CAP header) */
2102 frag = &skb_shinfo(skb)->frag_list;
2104 count = min_t(unsigned int, conn->mtu, len);
2106 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2110 memcpy(skb_put(*frag, count), data, count);
2115 frag = &(*frag)->next;
2125 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2127 struct l2cap_conf_opt *opt = *ptr;
2130 len = L2CAP_CONF_OPT_SIZE + opt->len;
2138 *val = *((u8 *) opt->val);
2142 *val = __le16_to_cpu(*((__le16 *) opt->val));
2146 *val = __le32_to_cpu(*((__le32 *) opt->val));
2150 *val = (unsigned long) opt->val;
2154 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2158 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2160 struct l2cap_conf_opt *opt = *ptr;
2162 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2169 *((u8 *) opt->val) = val;
2173 *((__le16 *) opt->val) = cpu_to_le16(val);
2177 *((__le32 *) opt->val) = cpu_to_le32(val);
2181 memcpy(opt->val, (void *) val, len);
2185 *ptr += L2CAP_CONF_OPT_SIZE + len;
2188 static inline void l2cap_ertm_init(struct sock *sk)
2190 l2cap_pi(sk)->expected_ack_seq = 0;
2191 l2cap_pi(sk)->unacked_frames = 0;
2192 l2cap_pi(sk)->buffer_seq = 0;
2193 l2cap_pi(sk)->num_to_ack = 0;
2195 setup_timer(&l2cap_pi(sk)->retrans_timer,
2196 l2cap_retrans_timeout, (unsigned long) sk);
2197 setup_timer(&l2cap_pi(sk)->monitor_timer,
2198 l2cap_monitor_timeout, (unsigned long) sk);
2200 __skb_queue_head_init(SREJ_QUEUE(sk));
2203 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2205 u32 local_feat_mask = l2cap_feat_mask;
2207 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2210 case L2CAP_MODE_ERTM:
2211 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2212 case L2CAP_MODE_STREAMING:
2213 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2219 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2222 case L2CAP_MODE_STREAMING:
2223 case L2CAP_MODE_ERTM:
2224 if (l2cap_mode_supported(mode, remote_feat_mask))
2228 return L2CAP_MODE_BASIC;
2232 static int l2cap_build_conf_req(struct sock *sk, void *data)
2234 struct l2cap_pinfo *pi = l2cap_pi(sk);
2235 struct l2cap_conf_req *req = data;
2236 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2237 void *ptr = req->data;
2239 BT_DBG("sk %p", sk);
2241 if (pi->num_conf_req || pi->num_conf_rsp)
2245 case L2CAP_MODE_STREAMING:
2246 case L2CAP_MODE_ERTM:
2247 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2248 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2249 l2cap_send_disconn_req(pi->conn, sk);
2252 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2258 case L2CAP_MODE_BASIC:
2259 if (pi->imtu != L2CAP_DEFAULT_MTU)
2260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2263 case L2CAP_MODE_ERTM:
2264 rfc.mode = L2CAP_MODE_ERTM;
2265 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2266 rfc.max_transmit = max_transmit;
2267 rfc.retrans_timeout = 0;
2268 rfc.monitor_timeout = 0;
2269 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2272 sizeof(rfc), (unsigned long) &rfc);
2274 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2277 if (pi->fcs == L2CAP_FCS_NONE ||
2278 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2279 pi->fcs = L2CAP_FCS_NONE;
2280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2284 case L2CAP_MODE_STREAMING:
2285 rfc.mode = L2CAP_MODE_STREAMING;
2287 rfc.max_transmit = 0;
2288 rfc.retrans_timeout = 0;
2289 rfc.monitor_timeout = 0;
2290 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2293 sizeof(rfc), (unsigned long) &rfc);
2295 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2298 if (pi->fcs == L2CAP_FCS_NONE ||
2299 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2300 pi->fcs = L2CAP_FCS_NONE;
2301 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2306 /* FIXME: Need actual value of the flush timeout */
2307 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2308 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2310 req->dcid = cpu_to_le16(pi->dcid);
2311 req->flags = cpu_to_le16(0);
2316 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2318 struct l2cap_pinfo *pi = l2cap_pi(sk);
2319 struct l2cap_conf_rsp *rsp = data;
2320 void *ptr = rsp->data;
2321 void *req = pi->conf_req;
2322 int len = pi->conf_len;
2323 int type, hint, olen;
2325 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2326 u16 mtu = L2CAP_DEFAULT_MTU;
2327 u16 result = L2CAP_CONF_SUCCESS;
2329 BT_DBG("sk %p", sk);
2331 while (len >= L2CAP_CONF_OPT_SIZE) {
2332 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2334 hint = type & L2CAP_CONF_HINT;
2335 type &= L2CAP_CONF_MASK;
2338 case L2CAP_CONF_MTU:
2342 case L2CAP_CONF_FLUSH_TO:
2346 case L2CAP_CONF_QOS:
2349 case L2CAP_CONF_RFC:
2350 if (olen == sizeof(rfc))
2351 memcpy(&rfc, (void *) val, olen);
2354 case L2CAP_CONF_FCS:
2355 if (val == L2CAP_FCS_NONE)
2356 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2364 result = L2CAP_CONF_UNKNOWN;
2365 *((u8 *) ptr++) = type;
2370 if (pi->num_conf_rsp || pi->num_conf_req)
2374 case L2CAP_MODE_STREAMING:
2375 case L2CAP_MODE_ERTM:
2376 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2377 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2378 return -ECONNREFUSED;
2381 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2386 if (pi->mode != rfc.mode) {
2387 result = L2CAP_CONF_UNACCEPT;
2388 rfc.mode = pi->mode;
2390 if (pi->num_conf_rsp == 1)
2391 return -ECONNREFUSED;
2393 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2394 sizeof(rfc), (unsigned long) &rfc);
2398 if (result == L2CAP_CONF_SUCCESS) {
2399 /* Configure output options and let the other side know
2400 * which ones we don't like. */
2402 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2403 result = L2CAP_CONF_UNACCEPT;
2406 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2411 case L2CAP_MODE_BASIC:
2412 pi->fcs = L2CAP_FCS_NONE;
2413 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2416 case L2CAP_MODE_ERTM:
2417 pi->remote_tx_win = rfc.txwin_size;
2418 pi->remote_max_tx = rfc.max_transmit;
2419 pi->max_pdu_size = rfc.max_pdu_size;
2421 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2422 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2424 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2426 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2427 sizeof(rfc), (unsigned long) &rfc);
2431 case L2CAP_MODE_STREAMING:
2432 pi->remote_tx_win = rfc.txwin_size;
2433 pi->max_pdu_size = rfc.max_pdu_size;
2435 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2443 result = L2CAP_CONF_UNACCEPT;
2445 memset(&rfc, 0, sizeof(rfc));
2446 rfc.mode = pi->mode;
2449 if (result == L2CAP_CONF_SUCCESS)
2450 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2452 rsp->scid = cpu_to_le16(pi->dcid);
2453 rsp->result = cpu_to_le16(result);
2454 rsp->flags = cpu_to_le16(0x0000);
2459 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2461 struct l2cap_pinfo *pi = l2cap_pi(sk);
2462 struct l2cap_conf_req *req = data;
2463 void *ptr = req->data;
2466 struct l2cap_conf_rfc rfc;
2468 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2470 while (len >= L2CAP_CONF_OPT_SIZE) {
2471 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2474 case L2CAP_CONF_MTU:
2475 if (val < L2CAP_DEFAULT_MIN_MTU) {
2476 *result = L2CAP_CONF_UNACCEPT;
2477 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2480 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2483 case L2CAP_CONF_FLUSH_TO:
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2489 case L2CAP_CONF_RFC:
2490 if (olen == sizeof(rfc))
2491 memcpy(&rfc, (void *)val, olen);
2493 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2494 rfc.mode != pi->mode)
2495 return -ECONNREFUSED;
2497 pi->mode = rfc.mode;
2500 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2501 sizeof(rfc), (unsigned long) &rfc);
2506 if (*result == L2CAP_CONF_SUCCESS) {
2508 case L2CAP_MODE_ERTM:
2509 pi->remote_tx_win = rfc.txwin_size;
2510 pi->retrans_timeout = rfc.retrans_timeout;
2511 pi->monitor_timeout = rfc.monitor_timeout;
2512 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2514 case L2CAP_MODE_STREAMING:
2515 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2520 req->dcid = cpu_to_le16(pi->dcid);
2521 req->flags = cpu_to_le16(0x0000);
2526 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2528 struct l2cap_conf_rsp *rsp = data;
2529 void *ptr = rsp->data;
2531 BT_DBG("sk %p", sk);
2533 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2534 rsp->result = cpu_to_le16(result);
2535 rsp->flags = cpu_to_le16(flags);
2540 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2542 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2544 if (rej->reason != 0x0000)
2547 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2548 cmd->ident == conn->info_ident) {
2549 del_timer(&conn->info_timer);
2551 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2552 conn->info_ident = 0;
2554 l2cap_conn_start(conn);
2560 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2562 struct l2cap_chan_list *list = &conn->chan_list;
2563 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2564 struct l2cap_conn_rsp rsp;
2565 struct sock *sk, *parent;
2566 int result, status = L2CAP_CS_NO_INFO;
2568 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2569 __le16 psm = req->psm;
2571 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2573 /* Check if we have socket listening on psm */
2574 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2576 result = L2CAP_CR_BAD_PSM;
2580 /* Check if the ACL is secure enough (if not SDP) */
2581 if (psm != cpu_to_le16(0x0001) &&
2582 !hci_conn_check_link_mode(conn->hcon)) {
2583 conn->disc_reason = 0x05;
2584 result = L2CAP_CR_SEC_BLOCK;
2588 result = L2CAP_CR_NO_MEM;
2590 /* Check for backlog size */
2591 if (sk_acceptq_is_full(parent)) {
2592 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2596 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2600 write_lock_bh(&list->lock);
2602 /* Check if we already have channel with that dcid */
2603 if (__l2cap_get_chan_by_dcid(list, scid)) {
2604 write_unlock_bh(&list->lock);
2605 sock_set_flag(sk, SOCK_ZAPPED);
2606 l2cap_sock_kill(sk);
2610 hci_conn_hold(conn->hcon);
2612 l2cap_sock_init(sk, parent);
2613 bacpy(&bt_sk(sk)->src, conn->src);
2614 bacpy(&bt_sk(sk)->dst, conn->dst);
2615 l2cap_pi(sk)->psm = psm;
2616 l2cap_pi(sk)->dcid = scid;
2618 __l2cap_chan_add(conn, sk, parent);
2619 dcid = l2cap_pi(sk)->scid;
2621 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2623 l2cap_pi(sk)->ident = cmd->ident;
2625 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2626 if (l2cap_check_security(sk)) {
2627 if (bt_sk(sk)->defer_setup) {
2628 sk->sk_state = BT_CONNECT2;
2629 result = L2CAP_CR_PEND;
2630 status = L2CAP_CS_AUTHOR_PEND;
2631 parent->sk_data_ready(parent, 0);
2633 sk->sk_state = BT_CONFIG;
2634 result = L2CAP_CR_SUCCESS;
2635 status = L2CAP_CS_NO_INFO;
2638 sk->sk_state = BT_CONNECT2;
2639 result = L2CAP_CR_PEND;
2640 status = L2CAP_CS_AUTHEN_PEND;
2643 sk->sk_state = BT_CONNECT2;
2644 result = L2CAP_CR_PEND;
2645 status = L2CAP_CS_NO_INFO;
2648 write_unlock_bh(&list->lock);
2651 bh_unlock_sock(parent);
2654 rsp.scid = cpu_to_le16(scid);
2655 rsp.dcid = cpu_to_le16(dcid);
2656 rsp.result = cpu_to_le16(result);
2657 rsp.status = cpu_to_le16(status);
2658 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2660 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2661 struct l2cap_info_req info;
2662 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2665 conn->info_ident = l2cap_get_ident(conn);
2667 mod_timer(&conn->info_timer, jiffies +
2668 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2670 l2cap_send_cmd(conn, conn->info_ident,
2671 L2CAP_INFO_REQ, sizeof(info), &info);
2677 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2679 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2680 u16 scid, dcid, result, status;
2684 scid = __le16_to_cpu(rsp->scid);
2685 dcid = __le16_to_cpu(rsp->dcid);
2686 result = __le16_to_cpu(rsp->result);
2687 status = __le16_to_cpu(rsp->status);
2689 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2692 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2696 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2702 case L2CAP_CR_SUCCESS:
2703 sk->sk_state = BT_CONFIG;
2704 l2cap_pi(sk)->ident = 0;
2705 l2cap_pi(sk)->dcid = dcid;
2706 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2708 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2710 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2711 l2cap_build_conf_req(sk, req), req);
2712 l2cap_pi(sk)->num_conf_req++;
2716 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2720 l2cap_chan_del(sk, ECONNREFUSED);
2728 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2730 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2736 dcid = __le16_to_cpu(req->dcid);
2737 flags = __le16_to_cpu(req->flags);
2739 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2741 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2745 if (sk->sk_state == BT_DISCONN)
2748 /* Reject if config buffer is too small. */
2749 len = cmd_len - sizeof(*req);
2750 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2751 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2752 l2cap_build_conf_rsp(sk, rsp,
2753 L2CAP_CONF_REJECT, flags), rsp);
2758 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2759 l2cap_pi(sk)->conf_len += len;
2761 if (flags & 0x0001) {
2762 /* Incomplete config. Send empty response. */
2763 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2764 l2cap_build_conf_rsp(sk, rsp,
2765 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2769 /* Complete config. */
2770 len = l2cap_parse_conf_req(sk, rsp);
2772 l2cap_send_disconn_req(conn, sk);
2776 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2777 l2cap_pi(sk)->num_conf_rsp++;
2779 /* Reset config buffer. */
2780 l2cap_pi(sk)->conf_len = 0;
2782 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2785 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2786 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2787 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2788 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2790 sk->sk_state = BT_CONNECTED;
2792 l2cap_pi(sk)->next_tx_seq = 0;
2793 l2cap_pi(sk)->expected_tx_seq = 0;
2794 __skb_queue_head_init(TX_QUEUE(sk));
2795 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2796 l2cap_ertm_init(sk);
2798 l2cap_chan_ready(sk);
2802 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2804 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2805 l2cap_build_conf_req(sk, buf), buf);
2806 l2cap_pi(sk)->num_conf_req++;
2814 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2816 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2817 u16 scid, flags, result;
2820 scid = __le16_to_cpu(rsp->scid);
2821 flags = __le16_to_cpu(rsp->flags);
2822 result = __le16_to_cpu(rsp->result);
2824 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2825 scid, flags, result);
2827 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2832 case L2CAP_CONF_SUCCESS:
2835 case L2CAP_CONF_UNACCEPT:
2836 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2837 int len = cmd->len - sizeof(*rsp);
2840 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2841 l2cap_send_disconn_req(conn, sk);
2845 /* throw out any old stored conf requests */
2846 result = L2CAP_CONF_SUCCESS;
2847 len = l2cap_parse_conf_rsp(sk, rsp->data,
2850 l2cap_send_disconn_req(conn, sk);
2854 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2855 L2CAP_CONF_REQ, len, req);
2856 l2cap_pi(sk)->num_conf_req++;
2857 if (result != L2CAP_CONF_SUCCESS)
2863 sk->sk_state = BT_DISCONN;
2864 sk->sk_err = ECONNRESET;
2865 l2cap_sock_set_timer(sk, HZ * 5);
2866 l2cap_send_disconn_req(conn, sk);
2873 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2875 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2876 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2877 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2878 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2880 sk->sk_state = BT_CONNECTED;
2881 l2cap_pi(sk)->next_tx_seq = 0;
2882 l2cap_pi(sk)->expected_tx_seq = 0;
2883 __skb_queue_head_init(TX_QUEUE(sk));
2884 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2885 l2cap_ertm_init(sk);
2887 l2cap_chan_ready(sk);
2895 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2897 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2898 struct l2cap_disconn_rsp rsp;
2902 scid = __le16_to_cpu(req->scid);
2903 dcid = __le16_to_cpu(req->dcid);
2905 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2907 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2911 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2915 sk->sk_shutdown = SHUTDOWN_MASK;
2917 skb_queue_purge(TX_QUEUE(sk));
2919 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2920 skb_queue_purge(SREJ_QUEUE(sk));
2921 del_timer(&l2cap_pi(sk)->retrans_timer);
2922 del_timer(&l2cap_pi(sk)->monitor_timer);
2925 l2cap_chan_del(sk, ECONNRESET);
2928 l2cap_sock_kill(sk);
2932 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2934 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2938 scid = __le16_to_cpu(rsp->scid);
2939 dcid = __le16_to_cpu(rsp->dcid);
2941 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2943 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2947 skb_queue_purge(TX_QUEUE(sk));
2949 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
2950 skb_queue_purge(SREJ_QUEUE(sk));
2951 del_timer(&l2cap_pi(sk)->retrans_timer);
2952 del_timer(&l2cap_pi(sk)->monitor_timer);
2955 l2cap_chan_del(sk, 0);
2958 l2cap_sock_kill(sk);
2962 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2964 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2967 type = __le16_to_cpu(req->type);
2969 BT_DBG("type 0x%4.4x", type);
2971 if (type == L2CAP_IT_FEAT_MASK) {
2973 u32 feat_mask = l2cap_feat_mask;
2974 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2975 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2976 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2978 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2980 put_unaligned_le32(feat_mask, rsp->data);
2981 l2cap_send_cmd(conn, cmd->ident,
2982 L2CAP_INFO_RSP, sizeof(buf), buf);
2983 } else if (type == L2CAP_IT_FIXED_CHAN) {
2985 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2986 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2987 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2988 memcpy(buf + 4, l2cap_fixed_chan, 8);
2989 l2cap_send_cmd(conn, cmd->ident,
2990 L2CAP_INFO_RSP, sizeof(buf), buf);
2992 struct l2cap_info_rsp rsp;
2993 rsp.type = cpu_to_le16(type);
2994 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2995 l2cap_send_cmd(conn, cmd->ident,
2996 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3002 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3004 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3007 type = __le16_to_cpu(rsp->type);
3008 result = __le16_to_cpu(rsp->result);
3010 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3012 del_timer(&conn->info_timer);
3014 if (type == L2CAP_IT_FEAT_MASK) {
3015 conn->feat_mask = get_unaligned_le32(rsp->data);
3017 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3018 struct l2cap_info_req req;
3019 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3021 conn->info_ident = l2cap_get_ident(conn);
3023 l2cap_send_cmd(conn, conn->info_ident,
3024 L2CAP_INFO_REQ, sizeof(req), &req);
3026 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3027 conn->info_ident = 0;
3029 l2cap_conn_start(conn);
3031 } else if (type == L2CAP_IT_FIXED_CHAN) {
3032 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3033 conn->info_ident = 0;
3035 l2cap_conn_start(conn);
3041 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3043 u8 *data = skb->data;
3045 struct l2cap_cmd_hdr cmd;
3048 l2cap_raw_recv(conn, skb);
3050 while (len >= L2CAP_CMD_HDR_SIZE) {
3052 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3053 data += L2CAP_CMD_HDR_SIZE;
3054 len -= L2CAP_CMD_HDR_SIZE;
3056 cmd_len = le16_to_cpu(cmd.len);
3058 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3060 if (cmd_len > len || !cmd.ident) {
3061 BT_DBG("corrupted command");
3066 case L2CAP_COMMAND_REJ:
3067 l2cap_command_rej(conn, &cmd, data);
3070 case L2CAP_CONN_REQ:
3071 err = l2cap_connect_req(conn, &cmd, data);
3074 case L2CAP_CONN_RSP:
3075 err = l2cap_connect_rsp(conn, &cmd, data);
3078 case L2CAP_CONF_REQ:
3079 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3082 case L2CAP_CONF_RSP:
3083 err = l2cap_config_rsp(conn, &cmd, data);
3086 case L2CAP_DISCONN_REQ:
3087 err = l2cap_disconnect_req(conn, &cmd, data);
3090 case L2CAP_DISCONN_RSP:
3091 err = l2cap_disconnect_rsp(conn, &cmd, data);
3094 case L2CAP_ECHO_REQ:
3095 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3098 case L2CAP_ECHO_RSP:
3101 case L2CAP_INFO_REQ:
3102 err = l2cap_information_req(conn, &cmd, data);
3105 case L2CAP_INFO_RSP:
3106 err = l2cap_information_rsp(conn, &cmd, data);
3110 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3116 struct l2cap_cmd_rej rej;
3117 BT_DBG("error %d", err);
3119 /* FIXME: Map err to a valid reason */
3120 rej.reason = cpu_to_le16(0);
3121 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3131 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3133 u16 our_fcs, rcv_fcs;
3134 int hdr_size = L2CAP_HDR_SIZE + 2;
3136 if (pi->fcs == L2CAP_FCS_CRC16) {
3137 skb_trim(skb, skb->len - 2);
3138 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3139 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3141 if (our_fcs != rcv_fcs)
3147 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3149 struct sk_buff *next_skb;
3151 bt_cb(skb)->tx_seq = tx_seq;
3152 bt_cb(skb)->sar = sar;
3154 next_skb = skb_peek(SREJ_QUEUE(sk));
3156 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3161 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3162 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3166 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3169 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3171 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3174 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3176 struct l2cap_pinfo *pi = l2cap_pi(sk);
3177 struct sk_buff *_skb;
3180 switch (control & L2CAP_CTRL_SAR) {
3181 case L2CAP_SDU_UNSEGMENTED:
3182 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3187 err = sock_queue_rcv_skb(sk, skb);
3193 case L2CAP_SDU_START:
3194 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3199 pi->sdu_len = get_unaligned_le16(skb->data);
3202 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3208 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3210 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3211 pi->partial_sdu_len = skb->len;
3215 case L2CAP_SDU_CONTINUE:
3216 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3219 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3221 pi->partial_sdu_len += skb->len;
3222 if (pi->partial_sdu_len > pi->sdu_len)
3230 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3233 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3235 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3236 pi->partial_sdu_len += skb->len;
3238 if (pi->partial_sdu_len == pi->sdu_len) {
3239 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3240 err = sock_queue_rcv_skb(sk, _skb);
3254 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3256 struct sk_buff *skb;
3259 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3260 if (bt_cb(skb)->tx_seq != tx_seq)
3263 skb = skb_dequeue(SREJ_QUEUE(sk));
3264 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3265 l2cap_sar_reassembly_sdu(sk, skb, control);
3266 l2cap_pi(sk)->buffer_seq_srej =
3267 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3272 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3274 struct l2cap_pinfo *pi = l2cap_pi(sk);
3275 struct srej_list *l, *tmp;
3278 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3279 if (l->tx_seq == tx_seq) {
3284 control = L2CAP_SUPER_SELECT_REJECT;
3285 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3286 l2cap_send_sframe(pi, control);
3288 list_add_tail(&l->list, SREJ_LIST(sk));
3292 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3294 struct l2cap_pinfo *pi = l2cap_pi(sk);
3295 struct srej_list *new;
3298 while (tx_seq != pi->expected_tx_seq) {
3299 control = L2CAP_SUPER_SELECT_REJECT;
3300 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3301 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
3302 control |= L2CAP_CTRL_POLL;
3303 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
3305 l2cap_send_sframe(pi, control);
3307 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3308 new->tx_seq = pi->expected_tx_seq++;
3309 list_add_tail(&new->list, SREJ_LIST(sk));
3311 pi->expected_tx_seq++;
3314 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3316 struct l2cap_pinfo *pi = l2cap_pi(sk);
3317 u8 tx_seq = __get_txseq(rx_control);
3318 u8 req_seq = __get_reqseq(rx_control);
3320 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3323 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3325 pi->expected_ack_seq = req_seq;
3326 l2cap_drop_acked_frames(sk);
3328 if (tx_seq == pi->expected_tx_seq)
3331 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3332 struct srej_list *first;
3334 first = list_first_entry(SREJ_LIST(sk),
3335 struct srej_list, list);
3336 if (tx_seq == first->tx_seq) {
3337 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3338 l2cap_check_srej_gap(sk, tx_seq);
3340 list_del(&first->list);
3343 if (list_empty(SREJ_LIST(sk))) {
3344 pi->buffer_seq = pi->buffer_seq_srej;
3345 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3348 struct srej_list *l;
3349 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3351 list_for_each_entry(l, SREJ_LIST(sk), list) {
3352 if (l->tx_seq == tx_seq) {
3353 l2cap_resend_srejframe(sk, tx_seq);
3357 l2cap_send_srejframe(sk, tx_seq);
3360 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3362 INIT_LIST_HEAD(SREJ_LIST(sk));
3363 pi->buffer_seq_srej = pi->buffer_seq;
3365 __skb_queue_head_init(SREJ_QUEUE(sk));
3366 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3368 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3370 l2cap_send_srejframe(sk, tx_seq);
3375 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3377 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3378 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3382 if (rx_control & L2CAP_CTRL_FINAL) {
3383 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3384 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3386 sk->sk_send_head = TX_QUEUE(sk)->next;
3387 pi->next_tx_seq = pi->expected_ack_seq;
3388 l2cap_ertm_send(sk);
3392 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3394 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3398 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3399 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
3400 tx_control |= L2CAP_SUPER_RCV_READY;
3401 tx_control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3402 l2cap_send_sframe(pi, tx_control);
3407 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3409 struct l2cap_pinfo *pi = l2cap_pi(sk);
3410 u8 tx_seq = __get_reqseq(rx_control);
3412 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3414 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3415 case L2CAP_SUPER_RCV_READY:
3416 if (rx_control & L2CAP_CTRL_POLL) {
3417 u16 control = L2CAP_CTRL_FINAL;
3418 control |= L2CAP_SUPER_RCV_READY |
3419 (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT);
3420 l2cap_send_sframe(l2cap_pi(sk), control);
3421 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3423 } else if (rx_control & L2CAP_CTRL_FINAL) {
3424 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3425 pi->expected_ack_seq = tx_seq;
3426 l2cap_drop_acked_frames(sk);
3428 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3429 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3431 sk->sk_send_head = TX_QUEUE(sk)->next;
3432 pi->next_tx_seq = pi->expected_ack_seq;
3433 l2cap_ertm_send(sk);
3436 if (!(pi->conn_state & L2CAP_CONN_WAIT_F))
3439 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3440 del_timer(&pi->monitor_timer);
3442 if (pi->unacked_frames > 0)
3443 __mod_retrans_timer();
3445 pi->expected_ack_seq = tx_seq;
3446 l2cap_drop_acked_frames(sk);
3448 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3449 (pi->unacked_frames > 0))
3450 __mod_retrans_timer();
3452 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3453 l2cap_ertm_send(sk);
3457 case L2CAP_SUPER_REJECT:
3458 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3460 pi->expected_ack_seq = __get_reqseq(rx_control);
3461 l2cap_drop_acked_frames(sk);
3463 if (rx_control & L2CAP_CTRL_FINAL) {
3464 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3465 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3467 sk->sk_send_head = TX_QUEUE(sk)->next;
3468 pi->next_tx_seq = pi->expected_ack_seq;
3469 l2cap_ertm_send(sk);
3472 sk->sk_send_head = TX_QUEUE(sk)->next;
3473 pi->next_tx_seq = pi->expected_ack_seq;
3474 l2cap_ertm_send(sk);
3476 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3477 pi->srej_save_reqseq = tx_seq;
3478 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3484 case L2CAP_SUPER_SELECT_REJECT:
3485 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3487 if (rx_control & L2CAP_CTRL_POLL) {
3488 pi->expected_ack_seq = tx_seq;
3489 l2cap_drop_acked_frames(sk);
3490 l2cap_retransmit_frame(sk, tx_seq);
3491 l2cap_ertm_send(sk);
3492 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3493 pi->srej_save_reqseq = tx_seq;
3494 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3496 } else if (rx_control & L2CAP_CTRL_FINAL) {
3497 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3498 pi->srej_save_reqseq == tx_seq)
3499 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3501 l2cap_retransmit_frame(sk, tx_seq);
3504 l2cap_retransmit_frame(sk, tx_seq);
3505 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3506 pi->srej_save_reqseq = tx_seq;
3507 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3512 case L2CAP_SUPER_RCV_NOT_READY:
3513 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3514 pi->expected_ack_seq = tx_seq;
3515 l2cap_drop_acked_frames(sk);
3517 del_timer(&l2cap_pi(sk)->retrans_timer);
3518 if (rx_control & L2CAP_CTRL_POLL) {
3519 u16 control = L2CAP_CTRL_FINAL;
3520 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
3529 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3532 struct l2cap_pinfo *pi;
3536 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3538 BT_DBG("unknown cid 0x%4.4x", cid);
3544 BT_DBG("sk %p, len %d", sk, skb->len);
3546 if (sk->sk_state != BT_CONNECTED)
3550 case L2CAP_MODE_BASIC:
3551 /* If socket recv buffers overflows we drop data here
3552 * which is *bad* because L2CAP has to be reliable.
3553 * But we don't have any other choice. L2CAP doesn't
3554 * provide flow control mechanism. */
3556 if (pi->imtu < skb->len)
3559 if (!sock_queue_rcv_skb(sk, skb))
3563 case L2CAP_MODE_ERTM:
3564 control = get_unaligned_le16(skb->data);
3568 if (__is_sar_start(control))
3571 if (pi->fcs == L2CAP_FCS_CRC16)
3575 * We can just drop the corrupted I-frame here.
3576 * Receiver will miss it and start proper recovery
3577 * procedures and ask retransmission.
3579 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3582 if (l2cap_check_fcs(pi, skb))
3585 if (__is_iframe(control))
3586 l2cap_data_channel_iframe(sk, control, skb);
3588 l2cap_data_channel_sframe(sk, control, skb);
3592 case L2CAP_MODE_STREAMING:
3593 control = get_unaligned_le16(skb->data);
3597 if (__is_sar_start(control))
3600 if (pi->fcs == L2CAP_FCS_CRC16)
3603 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE || __is_sframe(control))
3606 if (l2cap_check_fcs(pi, skb))
3609 tx_seq = __get_txseq(control);
3611 if (pi->expected_tx_seq == tx_seq)
3612 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3614 pi->expected_tx_seq = (tx_seq + 1) % 64;
3616 l2cap_sar_reassembly_sdu(sk, skb, control);
3621 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3635 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3639 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3643 BT_DBG("sk %p, len %d", sk, skb->len);
3645 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3648 if (l2cap_pi(sk)->imtu < skb->len)
3651 if (!sock_queue_rcv_skb(sk, skb))
3663 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3665 struct l2cap_hdr *lh = (void *) skb->data;
3669 skb_pull(skb, L2CAP_HDR_SIZE);
3670 cid = __le16_to_cpu(lh->cid);
3671 len = __le16_to_cpu(lh->len);
3673 if (len != skb->len) {
3678 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3681 case L2CAP_CID_SIGNALING:
3682 l2cap_sig_channel(conn, skb);
3685 case L2CAP_CID_CONN_LESS:
3686 psm = get_unaligned_le16(skb->data);
3688 l2cap_conless_channel(conn, psm, skb);
3692 l2cap_data_channel(conn, cid, skb);
3697 /* ---- L2CAP interface with lower layer (HCI) ---- */
3699 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3701 int exact = 0, lm1 = 0, lm2 = 0;
3702 register struct sock *sk;
3703 struct hlist_node *node;
3705 if (type != ACL_LINK)
3708 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3710 /* Find listening sockets and check their link_mode */
3711 read_lock(&l2cap_sk_list.lock);
3712 sk_for_each(sk, node, &l2cap_sk_list.head) {
3713 if (sk->sk_state != BT_LISTEN)
3716 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3717 lm1 |= HCI_LM_ACCEPT;
3718 if (l2cap_pi(sk)->role_switch)
3719 lm1 |= HCI_LM_MASTER;
3721 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3722 lm2 |= HCI_LM_ACCEPT;
3723 if (l2cap_pi(sk)->role_switch)
3724 lm2 |= HCI_LM_MASTER;
3727 read_unlock(&l2cap_sk_list.lock);
3729 return exact ? lm1 : lm2;
3732 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3734 struct l2cap_conn *conn;
3736 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3738 if (hcon->type != ACL_LINK)
3742 conn = l2cap_conn_add(hcon, status);
3744 l2cap_conn_ready(conn);
3746 l2cap_conn_del(hcon, bt_err(status));
3751 static int l2cap_disconn_ind(struct hci_conn *hcon)
3753 struct l2cap_conn *conn = hcon->l2cap_data;
3755 BT_DBG("hcon %p", hcon);
3757 if (hcon->type != ACL_LINK || !conn)
3760 return conn->disc_reason;
3763 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3765 BT_DBG("hcon %p reason %d", hcon, reason);
3767 if (hcon->type != ACL_LINK)
3770 l2cap_conn_del(hcon, bt_err(reason));
3775 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3777 if (sk->sk_type != SOCK_SEQPACKET)
3780 if (encrypt == 0x00) {
3781 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3782 l2cap_sock_clear_timer(sk);
3783 l2cap_sock_set_timer(sk, HZ * 5);
3784 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3785 __l2cap_sock_close(sk, ECONNREFUSED);
3787 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3788 l2cap_sock_clear_timer(sk);
3792 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3794 struct l2cap_chan_list *l;
3795 struct l2cap_conn *conn = hcon->l2cap_data;
3801 l = &conn->chan_list;
3803 BT_DBG("conn %p", conn);
3805 read_lock(&l->lock);
3807 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3810 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3815 if (!status && (sk->sk_state == BT_CONNECTED ||
3816 sk->sk_state == BT_CONFIG)) {
3817 l2cap_check_encryption(sk, encrypt);
3822 if (sk->sk_state == BT_CONNECT) {
3824 struct l2cap_conn_req req;
3825 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3826 req.psm = l2cap_pi(sk)->psm;
3828 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3830 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3831 L2CAP_CONN_REQ, sizeof(req), &req);
3833 l2cap_sock_clear_timer(sk);
3834 l2cap_sock_set_timer(sk, HZ / 10);
3836 } else if (sk->sk_state == BT_CONNECT2) {
3837 struct l2cap_conn_rsp rsp;
3841 sk->sk_state = BT_CONFIG;
3842 result = L2CAP_CR_SUCCESS;
3844 sk->sk_state = BT_DISCONN;
3845 l2cap_sock_set_timer(sk, HZ / 10);
3846 result = L2CAP_CR_SEC_BLOCK;
3849 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3850 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3851 rsp.result = cpu_to_le16(result);
3852 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3853 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3854 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3860 read_unlock(&l->lock);
3865 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3867 struct l2cap_conn *conn = hcon->l2cap_data;
3869 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3872 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3874 if (flags & ACL_START) {
3875 struct l2cap_hdr *hdr;
3879 BT_ERR("Unexpected start frame (len %d)", skb->len);
3880 kfree_skb(conn->rx_skb);
3881 conn->rx_skb = NULL;
3883 l2cap_conn_unreliable(conn, ECOMM);
3887 BT_ERR("Frame is too short (len %d)", skb->len);
3888 l2cap_conn_unreliable(conn, ECOMM);
3892 hdr = (struct l2cap_hdr *) skb->data;
3893 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3895 if (len == skb->len) {
3896 /* Complete frame received */
3897 l2cap_recv_frame(conn, skb);
3901 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3903 if (skb->len > len) {
3904 BT_ERR("Frame is too long (len %d, expected len %d)",
3906 l2cap_conn_unreliable(conn, ECOMM);
3910 /* Allocate skb for the complete frame (with header) */
3911 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3915 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3917 conn->rx_len = len - skb->len;
3919 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3921 if (!conn->rx_len) {
3922 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3923 l2cap_conn_unreliable(conn, ECOMM);
3927 if (skb->len > conn->rx_len) {
3928 BT_ERR("Fragment is too long (len %d, expected %d)",
3929 skb->len, conn->rx_len);
3930 kfree_skb(conn->rx_skb);
3931 conn->rx_skb = NULL;
3933 l2cap_conn_unreliable(conn, ECOMM);
3937 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3939 conn->rx_len -= skb->len;
3941 if (!conn->rx_len) {
3942 /* Complete frame received */
3943 l2cap_recv_frame(conn, conn->rx_skb);
3944 conn->rx_skb = NULL;
3953 static int l2cap_debugfs_show(struct seq_file *f, void *p)
3956 struct hlist_node *node;
3958 read_lock_bh(&l2cap_sk_list.lock);
3960 sk_for_each(sk, node, &l2cap_sk_list.head) {
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3963 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3964 batostr(&bt_sk(sk)->src),
3965 batostr(&bt_sk(sk)->dst),
3966 sk->sk_state, __le16_to_cpu(pi->psm),
3968 pi->imtu, pi->omtu, pi->sec_level);
3971 read_unlock_bh(&l2cap_sk_list.lock);
3976 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
3978 return single_open(file, l2cap_debugfs_show, inode->i_private);
3981 static const struct file_operations l2cap_debugfs_fops = {
3982 .open = l2cap_debugfs_open,
3984 .llseek = seq_lseek,
3985 .release = single_release,
3988 static struct dentry *l2cap_debugfs;
3990 static const struct proto_ops l2cap_sock_ops = {
3991 .family = PF_BLUETOOTH,
3992 .owner = THIS_MODULE,
3993 .release = l2cap_sock_release,
3994 .bind = l2cap_sock_bind,
3995 .connect = l2cap_sock_connect,
3996 .listen = l2cap_sock_listen,
3997 .accept = l2cap_sock_accept,
3998 .getname = l2cap_sock_getname,
3999 .sendmsg = l2cap_sock_sendmsg,
4000 .recvmsg = l2cap_sock_recvmsg,
4001 .poll = bt_sock_poll,
4002 .ioctl = bt_sock_ioctl,
4003 .mmap = sock_no_mmap,
4004 .socketpair = sock_no_socketpair,
4005 .shutdown = l2cap_sock_shutdown,
4006 .setsockopt = l2cap_sock_setsockopt,
4007 .getsockopt = l2cap_sock_getsockopt
4010 static const struct net_proto_family l2cap_sock_family_ops = {
4011 .family = PF_BLUETOOTH,
4012 .owner = THIS_MODULE,
4013 .create = l2cap_sock_create,
4016 static struct hci_proto l2cap_hci_proto = {
4018 .id = HCI_PROTO_L2CAP,
4019 .connect_ind = l2cap_connect_ind,
4020 .connect_cfm = l2cap_connect_cfm,
4021 .disconn_ind = l2cap_disconn_ind,
4022 .disconn_cfm = l2cap_disconn_cfm,
4023 .security_cfm = l2cap_security_cfm,
4024 .recv_acldata = l2cap_recv_acldata
4027 static int __init l2cap_init(void)
4031 err = proto_register(&l2cap_proto, 0);
4035 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4037 BT_ERR("L2CAP socket registration failed");
4041 err = hci_register_proto(&l2cap_hci_proto);
4043 BT_ERR("L2CAP protocol registration failed");
4044 bt_sock_unregister(BTPROTO_L2CAP);
4049 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4050 bt_debugfs, NULL, &l2cap_debugfs_fops);
4052 BT_ERR("Failed to create L2CAP debug file");
4055 BT_INFO("L2CAP ver %s", VERSION);
4056 BT_INFO("L2CAP socket layer initialized");
4061 proto_unregister(&l2cap_proto);
4065 static void __exit l2cap_exit(void)
4067 debugfs_remove(l2cap_debugfs);
4069 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4070 BT_ERR("L2CAP socket unregistration failed");
4072 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4073 BT_ERR("L2CAP protocol unregistration failed");
4075 proto_unregister(&l2cap_proto);
4078 void l2cap_load(void)
4080 /* Dummy function to trigger automatic L2CAP module loading by
4081 * other modules that use L2CAP sockets but don't use any other
4082 * symbols from it. */
4085 EXPORT_SYMBOL(l2cap_load);
4087 module_init(l2cap_init);
4088 module_exit(l2cap_exit);
4090 module_param(enable_ertm, bool, 0644);
4091 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4093 module_param(max_transmit, uint, 0644);
4094 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4096 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4097 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4098 MODULE_VERSION(VERSION);
4099 MODULE_LICENSE("GPL");
4100 MODULE_ALIAS("bt-proto-0");