2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/uaccess.h>
46 #include <asm/system.h>
47 #include <asm/unaligned.h>
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
53 #define VERSION "2.14"
55 static int enable_ertm = 0;
57 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
58 static u8 l2cap_fixed_chan[8] = { 0x02, };
60 static const struct proto_ops l2cap_sock_ops;
62 static struct bt_sock_list l2cap_sk_list = {
63 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 static void __l2cap_sock_close(struct sock *sk, int reason);
67 static void l2cap_sock_close(struct sock *sk);
68 static void l2cap_sock_kill(struct sock *sk);
70 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
71 u8 code, u8 ident, u16 dlen, void *data);
73 /* ---- L2CAP timers ---- */
74 static void l2cap_sock_timeout(unsigned long arg)
76 struct sock *sk = (struct sock *) arg;
79 BT_DBG("sock %p state %d", sk, sk->sk_state);
83 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
84 reason = ECONNREFUSED;
85 else if (sk->sk_state == BT_CONNECT &&
86 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
87 reason = ECONNREFUSED;
91 __l2cap_sock_close(sk, reason);
99 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
102 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
105 static void l2cap_sock_clear_timer(struct sock *sk)
107 BT_DBG("sock %p state %d", sk, sk->sk_state);
108 sk_stop_timer(sk, &sk->sk_timer);
111 /* ---- L2CAP channels ---- */
112 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
115 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
116 if (l2cap_pi(s)->dcid == cid)
122 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->scid == cid)
132 /* Find channel with given SCID.
133 * Returns locked socket */
134 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 s = __l2cap_get_chan_by_scid(l, cid);
141 read_unlock(&l->lock);
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 s = __l2cap_get_chan_by_ident(l, ident);
162 read_unlock(&l->lock);
166 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
168 u16 cid = L2CAP_CID_DYN_START;
170 for (; cid < L2CAP_CID_DYN_END; cid++) {
171 if (!__l2cap_get_chan_by_scid(l, cid))
178 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
183 l2cap_pi(l->head)->prev_c = sk;
185 l2cap_pi(sk)->next_c = l->head;
186 l2cap_pi(sk)->prev_c = NULL;
190 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
192 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
194 write_lock_bh(&l->lock);
199 l2cap_pi(next)->prev_c = prev;
201 l2cap_pi(prev)->next_c = next;
202 write_unlock_bh(&l->lock);
207 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
209 struct l2cap_chan_list *l = &conn->chan_list;
211 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
212 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
214 conn->disc_reason = 0x13;
216 l2cap_pi(sk)->conn = conn;
218 if (sk->sk_type == SOCK_SEQPACKET) {
219 /* Alloc CID for connection-oriented socket */
220 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
221 } else if (sk->sk_type == SOCK_DGRAM) {
222 /* Connectionless socket */
223 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
224 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
225 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 /* Raw socket can send/recv signalling messages only */
228 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
229 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
233 __l2cap_chan_link(l, sk);
236 bt_accept_enqueue(parent, sk);
240 * Must be called on the locked socket. */
241 static void l2cap_chan_del(struct sock *sk, int err)
243 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
244 struct sock *parent = bt_sk(sk)->parent;
246 l2cap_sock_clear_timer(sk);
248 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
251 /* Unlink from channel list */
252 l2cap_chan_unlink(&conn->chan_list, sk);
253 l2cap_pi(sk)->conn = NULL;
254 hci_conn_put(conn->hcon);
257 sk->sk_state = BT_CLOSED;
258 sock_set_flag(sk, SOCK_ZAPPED);
264 bt_accept_unlink(sk);
265 parent->sk_data_ready(parent, 0);
267 sk->sk_state_change(sk);
270 /* Service level security */
271 static inline int l2cap_check_security(struct sock *sk)
273 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
276 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
277 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
278 auth_type = HCI_AT_NO_BONDING_MITM;
280 auth_type = HCI_AT_NO_BONDING;
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
283 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
285 switch (l2cap_pi(sk)->sec_level) {
286 case BT_SECURITY_HIGH:
287 auth_type = HCI_AT_GENERAL_BONDING_MITM;
289 case BT_SECURITY_MEDIUM:
290 auth_type = HCI_AT_GENERAL_BONDING;
293 auth_type = HCI_AT_NO_BONDING;
298 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
302 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
306 /* Get next available identificator.
307 * 1 - 128 are used by kernel.
308 * 129 - 199 are reserved.
309 * 200 - 254 are used by utilities like l2ping, etc.
312 spin_lock_bh(&conn->lock);
314 if (++conn->tx_ident > 128)
319 spin_unlock_bh(&conn->lock);
324 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
326 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
328 BT_DBG("code 0x%2.2x", code);
333 return hci_send_acl(conn->hcon, skb, 0);
336 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
339 struct l2cap_hdr *lh;
340 struct l2cap_conn *conn = pi->conn;
343 BT_DBG("pi %p, control 0x%2.2x", pi, control);
345 count = min_t(unsigned int, conn->mtu, L2CAP_HDR_SIZE + 2);
346 control |= L2CAP_CTRL_FRAME_TYPE;
348 skb = bt_skb_alloc(count, GFP_ATOMIC);
352 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
353 lh->len = cpu_to_le16(2);
354 lh->cid = cpu_to_le16(pi->dcid);
355 put_unaligned_le16(control, skb_put(skb, 2));
357 return hci_send_acl(pi->conn->hcon, skb, 0);
360 static void l2cap_do_start(struct sock *sk)
362 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
364 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
365 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
368 if (l2cap_check_security(sk)) {
369 struct l2cap_conn_req req;
370 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
371 req.psm = l2cap_pi(sk)->psm;
373 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
375 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
376 L2CAP_CONN_REQ, sizeof(req), &req);
379 struct l2cap_info_req req;
380 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
383 conn->info_ident = l2cap_get_ident(conn);
385 mod_timer(&conn->info_timer, jiffies +
386 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
388 l2cap_send_cmd(conn, conn->info_ident,
389 L2CAP_INFO_REQ, sizeof(req), &req);
393 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
395 struct l2cap_disconn_req req;
397 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
398 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
399 l2cap_send_cmd(conn, l2cap_get_ident(conn),
400 L2CAP_DISCONN_REQ, sizeof(req), &req);
403 /* ---- L2CAP connections ---- */
404 static void l2cap_conn_start(struct l2cap_conn *conn)
406 struct l2cap_chan_list *l = &conn->chan_list;
409 BT_DBG("conn %p", conn);
413 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
416 if (sk->sk_type != SOCK_SEQPACKET) {
421 if (sk->sk_state == BT_CONNECT) {
422 if (l2cap_check_security(sk)) {
423 struct l2cap_conn_req req;
424 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
425 req.psm = l2cap_pi(sk)->psm;
427 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
429 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
430 L2CAP_CONN_REQ, sizeof(req), &req);
432 } else if (sk->sk_state == BT_CONNECT2) {
433 struct l2cap_conn_rsp rsp;
434 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
435 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
437 if (l2cap_check_security(sk)) {
438 if (bt_sk(sk)->defer_setup) {
439 struct sock *parent = bt_sk(sk)->parent;
440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
442 parent->sk_data_ready(parent, 0);
445 sk->sk_state = BT_CONFIG;
446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
461 read_unlock(&l->lock);
464 static void l2cap_conn_ready(struct l2cap_conn *conn)
466 struct l2cap_chan_list *l = &conn->chan_list;
469 BT_DBG("conn %p", conn);
473 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
476 if (sk->sk_type != SOCK_SEQPACKET) {
477 l2cap_sock_clear_timer(sk);
478 sk->sk_state = BT_CONNECTED;
479 sk->sk_state_change(sk);
480 } else if (sk->sk_state == BT_CONNECT)
486 read_unlock(&l->lock);
489 /* Notify sockets that we cannot guaranty reliability anymore */
490 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
492 struct l2cap_chan_list *l = &conn->chan_list;
495 BT_DBG("conn %p", conn);
499 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
500 if (l2cap_pi(sk)->force_reliable)
504 read_unlock(&l->lock);
507 static void l2cap_info_timeout(unsigned long arg)
509 struct l2cap_conn *conn = (void *) arg;
511 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
512 conn->info_ident = 0;
514 l2cap_conn_start(conn);
517 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
519 struct l2cap_conn *conn = hcon->l2cap_data;
524 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
528 hcon->l2cap_data = conn;
531 BT_DBG("hcon %p conn %p", hcon, conn);
533 conn->mtu = hcon->hdev->acl_mtu;
534 conn->src = &hcon->hdev->bdaddr;
535 conn->dst = &hcon->dst;
539 setup_timer(&conn->info_timer, l2cap_info_timeout,
540 (unsigned long) conn);
542 spin_lock_init(&conn->lock);
543 rwlock_init(&conn->chan_list.lock);
545 conn->disc_reason = 0x13;
550 static void l2cap_conn_del(struct hci_conn *hcon, int err)
552 struct l2cap_conn *conn = hcon->l2cap_data;
558 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
560 kfree_skb(conn->rx_skb);
563 while ((sk = conn->chan_list.head)) {
565 l2cap_chan_del(sk, err);
570 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
571 del_timer_sync(&conn->info_timer);
573 hcon->l2cap_data = NULL;
577 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
579 struct l2cap_chan_list *l = &conn->chan_list;
580 write_lock_bh(&l->lock);
581 __l2cap_chan_add(conn, sk, parent);
582 write_unlock_bh(&l->lock);
585 /* ---- Socket interface ---- */
586 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
589 struct hlist_node *node;
590 sk_for_each(sk, node, &l2cap_sk_list.head)
591 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
598 /* Find socket with psm and source bdaddr.
599 * Returns closest match.
601 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
603 struct sock *sk = NULL, *sk1 = NULL;
604 struct hlist_node *node;
606 sk_for_each(sk, node, &l2cap_sk_list.head) {
607 if (state && sk->sk_state != state)
610 if (l2cap_pi(sk)->psm == psm) {
612 if (!bacmp(&bt_sk(sk)->src, src))
616 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
620 return node ? sk : sk1;
623 /* Find socket with given address (psm, src).
624 * Returns locked socket */
625 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
628 read_lock(&l2cap_sk_list.lock);
629 s = __l2cap_get_sock_by_psm(state, psm, src);
632 read_unlock(&l2cap_sk_list.lock);
636 static void l2cap_sock_destruct(struct sock *sk)
640 skb_queue_purge(&sk->sk_receive_queue);
641 skb_queue_purge(&sk->sk_write_queue);
644 static void l2cap_sock_cleanup_listen(struct sock *parent)
648 BT_DBG("parent %p", parent);
650 /* Close not yet accepted channels */
651 while ((sk = bt_accept_dequeue(parent, NULL)))
652 l2cap_sock_close(sk);
654 parent->sk_state = BT_CLOSED;
655 sock_set_flag(parent, SOCK_ZAPPED);
658 /* Kill socket (only if zapped and orphan)
659 * Must be called on unlocked socket.
661 static void l2cap_sock_kill(struct sock *sk)
663 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
666 BT_DBG("sk %p state %d", sk, sk->sk_state);
668 /* Kill poor orphan */
669 bt_sock_unlink(&l2cap_sk_list, sk);
670 sock_set_flag(sk, SOCK_DEAD);
674 static void __l2cap_sock_close(struct sock *sk, int reason)
676 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
678 switch (sk->sk_state) {
680 l2cap_sock_cleanup_listen(sk);
685 if (sk->sk_type == SOCK_SEQPACKET) {
686 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
688 sk->sk_state = BT_DISCONN;
689 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
690 l2cap_send_disconn_req(conn, sk);
692 l2cap_chan_del(sk, reason);
696 if (sk->sk_type == SOCK_SEQPACKET) {
697 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
698 struct l2cap_conn_rsp rsp;
701 if (bt_sk(sk)->defer_setup)
702 result = L2CAP_CR_SEC_BLOCK;
704 result = L2CAP_CR_BAD_PSM;
706 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
707 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
708 rsp.result = cpu_to_le16(result);
709 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
710 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
711 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
713 l2cap_chan_del(sk, reason);
718 l2cap_chan_del(sk, reason);
722 sock_set_flag(sk, SOCK_ZAPPED);
727 /* Must be called on unlocked socket. */
728 static void l2cap_sock_close(struct sock *sk)
730 l2cap_sock_clear_timer(sk);
732 __l2cap_sock_close(sk, ECONNRESET);
737 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
739 struct l2cap_pinfo *pi = l2cap_pi(sk);
744 sk->sk_type = parent->sk_type;
745 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
747 pi->imtu = l2cap_pi(parent)->imtu;
748 pi->omtu = l2cap_pi(parent)->omtu;
749 pi->mode = l2cap_pi(parent)->mode;
750 pi->fcs = l2cap_pi(parent)->fcs;
751 pi->sec_level = l2cap_pi(parent)->sec_level;
752 pi->role_switch = l2cap_pi(parent)->role_switch;
753 pi->force_reliable = l2cap_pi(parent)->force_reliable;
755 pi->imtu = L2CAP_DEFAULT_MTU;
757 pi->mode = L2CAP_MODE_BASIC;
758 pi->fcs = L2CAP_FCS_CRC16;
759 pi->sec_level = BT_SECURITY_LOW;
761 pi->force_reliable = 0;
764 /* Default config options */
766 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
769 static struct proto l2cap_proto = {
771 .owner = THIS_MODULE,
772 .obj_size = sizeof(struct l2cap_pinfo)
775 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
779 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
783 sock_init_data(sock, sk);
784 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
786 sk->sk_destruct = l2cap_sock_destruct;
787 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
789 sock_reset_flag(sk, SOCK_ZAPPED);
791 sk->sk_protocol = proto;
792 sk->sk_state = BT_OPEN;
794 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
796 bt_sock_link(&l2cap_sk_list, sk);
800 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
804 BT_DBG("sock %p", sock);
806 sock->state = SS_UNCONNECTED;
808 if (sock->type != SOCK_SEQPACKET &&
809 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
810 return -ESOCKTNOSUPPORT;
812 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
815 sock->ops = &l2cap_sock_ops;
817 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
821 l2cap_sock_init(sk, NULL);
825 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
827 struct sock *sk = sock->sk;
828 struct sockaddr_l2 la;
833 if (!addr || addr->sa_family != AF_BLUETOOTH)
836 memset(&la, 0, sizeof(la));
837 len = min_t(unsigned int, sizeof(la), alen);
838 memcpy(&la, addr, len);
845 if (sk->sk_state != BT_OPEN) {
850 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
851 !capable(CAP_NET_BIND_SERVICE)) {
856 write_lock_bh(&l2cap_sk_list.lock);
858 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
861 /* Save source address */
862 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
863 l2cap_pi(sk)->psm = la.l2_psm;
864 l2cap_pi(sk)->sport = la.l2_psm;
865 sk->sk_state = BT_BOUND;
867 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
868 __le16_to_cpu(la.l2_psm) == 0x0003)
869 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
872 write_unlock_bh(&l2cap_sk_list.lock);
879 static int l2cap_do_connect(struct sock *sk)
881 bdaddr_t *src = &bt_sk(sk)->src;
882 bdaddr_t *dst = &bt_sk(sk)->dst;
883 struct l2cap_conn *conn;
884 struct hci_conn *hcon;
885 struct hci_dev *hdev;
889 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
892 hdev = hci_get_route(dst, src);
894 return -EHOSTUNREACH;
896 hci_dev_lock_bh(hdev);
900 if (sk->sk_type == SOCK_RAW) {
901 switch (l2cap_pi(sk)->sec_level) {
902 case BT_SECURITY_HIGH:
903 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
905 case BT_SECURITY_MEDIUM:
906 auth_type = HCI_AT_DEDICATED_BONDING;
909 auth_type = HCI_AT_NO_BONDING;
912 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
913 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
914 auth_type = HCI_AT_NO_BONDING_MITM;
916 auth_type = HCI_AT_NO_BONDING;
918 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
919 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
921 switch (l2cap_pi(sk)->sec_level) {
922 case BT_SECURITY_HIGH:
923 auth_type = HCI_AT_GENERAL_BONDING_MITM;
925 case BT_SECURITY_MEDIUM:
926 auth_type = HCI_AT_GENERAL_BONDING;
929 auth_type = HCI_AT_NO_BONDING;
934 hcon = hci_connect(hdev, ACL_LINK, dst,
935 l2cap_pi(sk)->sec_level, auth_type);
939 conn = l2cap_conn_add(hcon, 0);
947 /* Update source addr of the socket */
948 bacpy(src, conn->src);
950 l2cap_chan_add(conn, sk, NULL);
952 sk->sk_state = BT_CONNECT;
953 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
955 if (hcon->state == BT_CONNECTED) {
956 if (sk->sk_type != SOCK_SEQPACKET) {
957 l2cap_sock_clear_timer(sk);
958 sk->sk_state = BT_CONNECTED;
964 hci_dev_unlock_bh(hdev);
969 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
971 struct sock *sk = sock->sk;
972 struct sockaddr_l2 la;
977 if (!addr || addr->sa_family != AF_BLUETOOTH)
980 memset(&la, 0, sizeof(la));
981 len = min_t(unsigned int, sizeof(la), alen);
982 memcpy(&la, addr, len);
989 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
994 switch (l2cap_pi(sk)->mode) {
995 case L2CAP_MODE_BASIC:
997 case L2CAP_MODE_ERTM:
998 case L2CAP_MODE_STREAMING:
1007 switch (sk->sk_state) {
1011 /* Already connecting */
1015 /* Already connected */
1028 /* Set destination address and psm */
1029 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1030 l2cap_pi(sk)->psm = la.l2_psm;
1032 err = l2cap_do_connect(sk);
1037 err = bt_sock_wait_state(sk, BT_CONNECTED,
1038 sock_sndtimeo(sk, flags & O_NONBLOCK));
1044 static int l2cap_sock_listen(struct socket *sock, int backlog)
1046 struct sock *sk = sock->sk;
1049 BT_DBG("sk %p backlog %d", sk, backlog);
1053 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1058 switch (l2cap_pi(sk)->mode) {
1059 case L2CAP_MODE_BASIC:
1061 case L2CAP_MODE_ERTM:
1062 case L2CAP_MODE_STREAMING:
1071 if (!l2cap_pi(sk)->psm) {
1072 bdaddr_t *src = &bt_sk(sk)->src;
1077 write_lock_bh(&l2cap_sk_list.lock);
1079 for (psm = 0x1001; psm < 0x1100; psm += 2)
1080 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1081 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1082 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1087 write_unlock_bh(&l2cap_sk_list.lock);
1093 sk->sk_max_ack_backlog = backlog;
1094 sk->sk_ack_backlog = 0;
1095 sk->sk_state = BT_LISTEN;
1102 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1104 DECLARE_WAITQUEUE(wait, current);
1105 struct sock *sk = sock->sk, *nsk;
1109 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1111 if (sk->sk_state != BT_LISTEN) {
1116 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1118 BT_DBG("sk %p timeo %ld", sk, timeo);
1120 /* Wait for an incoming connection. (wake-one). */
1121 add_wait_queue_exclusive(sk->sk_sleep, &wait);
1122 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1123 set_current_state(TASK_INTERRUPTIBLE);
1130 timeo = schedule_timeout(timeo);
1131 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1133 if (sk->sk_state != BT_LISTEN) {
1138 if (signal_pending(current)) {
1139 err = sock_intr_errno(timeo);
1143 set_current_state(TASK_RUNNING);
1144 remove_wait_queue(sk->sk_sleep, &wait);
1149 newsock->state = SS_CONNECTED;
1151 BT_DBG("new socket %p", nsk);
1158 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1160 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1161 struct sock *sk = sock->sk;
1163 BT_DBG("sock %p, sk %p", sock, sk);
1165 addr->sa_family = AF_BLUETOOTH;
1166 *len = sizeof(struct sockaddr_l2);
1169 la->l2_psm = l2cap_pi(sk)->psm;
1170 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1171 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1173 la->l2_psm = l2cap_pi(sk)->sport;
1174 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1175 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1181 static void l2cap_drop_acked_frames(struct sock *sk)
1183 struct sk_buff *skb;
1185 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1186 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1189 skb = skb_dequeue(TX_QUEUE(sk));
1192 l2cap_pi(sk)->unacked_frames--;
1198 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1200 struct l2cap_pinfo *pi = l2cap_pi(sk);
1203 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1205 err = hci_send_acl(pi->conn->hcon, skb, 0);
1212 static int l2cap_ertm_send(struct sock *sk)
1214 struct sk_buff *skb, *tx_skb;
1215 struct l2cap_pinfo *pi = l2cap_pi(sk);
1219 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1220 tx_skb = skb_clone(skb, GFP_ATOMIC);
1222 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1223 control |= (pi->req_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1224 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1225 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1227 err = l2cap_do_send(sk, tx_skb);
1229 l2cap_send_disconn_req(pi->conn, sk);
1233 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1234 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1236 pi->unacked_frames++;
1238 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1239 sk->sk_send_head = NULL;
1241 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1247 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1249 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1250 struct sk_buff **frag;
1253 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1260 /* Continuation fragments (no L2CAP header) */
1261 frag = &skb_shinfo(skb)->frag_list;
1263 count = min_t(unsigned int, conn->mtu, len);
1265 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1268 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1274 frag = &(*frag)->next;
1280 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1283 struct sk_buff *skb;
1284 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1285 struct l2cap_hdr *lh;
1287 BT_DBG("sk %p len %d", sk, (int)len);
1289 count = min_t(unsigned int, (conn->mtu - hlen), len);
1290 skb = bt_skb_send_alloc(sk, count + hlen,
1291 msg->msg_flags & MSG_DONTWAIT, &err);
1293 return ERR_PTR(-ENOMEM);
1295 /* Create L2CAP header */
1296 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1297 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1298 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1299 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1301 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1302 if (unlikely(err < 0)) {
1304 return ERR_PTR(err);
1309 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1311 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1312 struct sk_buff *skb;
1313 int err, count, hlen = L2CAP_HDR_SIZE;
1314 struct l2cap_hdr *lh;
1316 BT_DBG("sk %p len %d", sk, (int)len);
1318 count = min_t(unsigned int, (conn->mtu - hlen), len);
1319 skb = bt_skb_send_alloc(sk, count + hlen,
1320 msg->msg_flags & MSG_DONTWAIT, &err);
1322 return ERR_PTR(-ENOMEM);
1324 /* Create L2CAP header */
1325 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1326 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1327 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1329 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1330 if (unlikely(err < 0)) {
1332 return ERR_PTR(err);
1337 static struct sk_buff *l2cap_create_ertm_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1339 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1340 struct sk_buff *skb;
1341 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1342 struct l2cap_hdr *lh;
1344 BT_DBG("sk %p len %d", sk, (int)len);
1349 count = min_t(unsigned int, (conn->mtu - hlen), len);
1350 skb = bt_skb_send_alloc(sk, count + hlen,
1351 msg->msg_flags & MSG_DONTWAIT, &err);
1353 return ERR_PTR(-ENOMEM);
1355 /* Create L2CAP header */
1356 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1357 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1358 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1359 put_unaligned_le16(control, skb_put(skb, 2));
1361 put_unaligned_le16(sdulen, skb_put(skb, 2));
1363 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1364 if (unlikely(err < 0)) {
1366 return ERR_PTR(err);
1371 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb;
1375 struct sk_buff_head sar_queue;
1379 __skb_queue_head_init(&sar_queue);
1380 control = L2CAP_SDU_START;
1381 skb = l2cap_create_ertm_pdu(sk, msg, pi->max_pdu_size, control, len);
1383 return PTR_ERR(skb);
1385 __skb_queue_tail(&sar_queue, skb);
1386 len -= pi->max_pdu_size;
1387 size +=pi->max_pdu_size;
1393 if (len > pi->max_pdu_size) {
1394 control |= L2CAP_SDU_CONTINUE;
1395 buflen = pi->max_pdu_size;
1397 control |= L2CAP_SDU_END;
1401 skb = l2cap_create_ertm_pdu(sk, msg, buflen, control, 0);
1403 skb_queue_purge(&sar_queue);
1404 return PTR_ERR(skb);
1407 __skb_queue_tail(&sar_queue, skb);
1412 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1413 if (sk->sk_send_head == NULL)
1414 sk->sk_send_head = sar_queue.next;
1419 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1421 struct sock *sk = sock->sk;
1422 struct l2cap_pinfo *pi = l2cap_pi(sk);
1423 struct sk_buff *skb;
1427 BT_DBG("sock %p, sk %p", sock, sk);
1429 err = sock_error(sk);
1433 if (msg->msg_flags & MSG_OOB)
1436 /* Check outgoing MTU */
1437 if (sk->sk_type == SOCK_SEQPACKET && pi->mode == L2CAP_MODE_BASIC
1443 if (sk->sk_state != BT_CONNECTED) {
1448 /* Connectionless channel */
1449 if (sk->sk_type == SOCK_DGRAM) {
1450 skb = l2cap_create_connless_pdu(sk, msg, len);
1451 err = l2cap_do_send(sk, skb);
1456 case L2CAP_MODE_BASIC:
1457 /* Create a basic PDU */
1458 skb = l2cap_create_basic_pdu(sk, msg, len);
1464 err = l2cap_do_send(sk, skb);
1469 case L2CAP_MODE_ERTM:
1470 /* Entire SDU fits into one PDU */
1471 if (len <= pi->max_pdu_size) {
1472 control = L2CAP_SDU_UNSEGMENTED;
1473 skb = l2cap_create_ertm_pdu(sk, msg, len, control, 0);
1478 __skb_queue_tail(TX_QUEUE(sk), skb);
1479 if (sk->sk_send_head == NULL)
1480 sk->sk_send_head = skb;
1482 /* Segment SDU into multiples PDUs */
1483 err = l2cap_sar_segment_sdu(sk, msg, len);
1488 err = l2cap_ertm_send(sk);
1494 BT_DBG("bad state %1.1x", pi->mode);
1503 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1505 struct sock *sk = sock->sk;
1509 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1510 struct l2cap_conn_rsp rsp;
1512 sk->sk_state = BT_CONFIG;
1514 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1515 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1516 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1517 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1518 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1519 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1527 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1530 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, int optlen)
1532 struct sock *sk = sock->sk;
1533 struct l2cap_options opts;
1537 BT_DBG("sk %p", sk);
1543 opts.imtu = l2cap_pi(sk)->imtu;
1544 opts.omtu = l2cap_pi(sk)->omtu;
1545 opts.flush_to = l2cap_pi(sk)->flush_to;
1546 opts.mode = l2cap_pi(sk)->mode;
1548 len = min_t(unsigned int, sizeof(opts), optlen);
1549 if (copy_from_user((char *) &opts, optval, len)) {
1554 l2cap_pi(sk)->imtu = opts.imtu;
1555 l2cap_pi(sk)->omtu = opts.omtu;
1556 l2cap_pi(sk)->mode = opts.mode;
1560 if (get_user(opt, (u32 __user *) optval)) {
1565 if (opt & L2CAP_LM_AUTH)
1566 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1567 if (opt & L2CAP_LM_ENCRYPT)
1568 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1569 if (opt & L2CAP_LM_SECURE)
1570 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1572 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1573 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1585 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1587 struct sock *sk = sock->sk;
1588 struct bt_security sec;
1592 BT_DBG("sk %p", sk);
1594 if (level == SOL_L2CAP)
1595 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1597 if (level != SOL_BLUETOOTH)
1598 return -ENOPROTOOPT;
1604 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1609 sec.level = BT_SECURITY_LOW;
1611 len = min_t(unsigned int, sizeof(sec), optlen);
1612 if (copy_from_user((char *) &sec, optval, len)) {
1617 if (sec.level < BT_SECURITY_LOW ||
1618 sec.level > BT_SECURITY_HIGH) {
1623 l2cap_pi(sk)->sec_level = sec.level;
1626 case BT_DEFER_SETUP:
1627 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1632 if (get_user(opt, (u32 __user *) optval)) {
1637 bt_sk(sk)->defer_setup = opt;
1649 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1651 struct sock *sk = sock->sk;
1652 struct l2cap_options opts;
1653 struct l2cap_conninfo cinfo;
1657 BT_DBG("sk %p", sk);
1659 if (get_user(len, optlen))
1666 opts.imtu = l2cap_pi(sk)->imtu;
1667 opts.omtu = l2cap_pi(sk)->omtu;
1668 opts.flush_to = l2cap_pi(sk)->flush_to;
1669 opts.mode = l2cap_pi(sk)->mode;
1671 len = min_t(unsigned int, len, sizeof(opts));
1672 if (copy_to_user(optval, (char *) &opts, len))
1678 switch (l2cap_pi(sk)->sec_level) {
1679 case BT_SECURITY_LOW:
1680 opt = L2CAP_LM_AUTH;
1682 case BT_SECURITY_MEDIUM:
1683 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1685 case BT_SECURITY_HIGH:
1686 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1694 if (l2cap_pi(sk)->role_switch)
1695 opt |= L2CAP_LM_MASTER;
1697 if (l2cap_pi(sk)->force_reliable)
1698 opt |= L2CAP_LM_RELIABLE;
1700 if (put_user(opt, (u32 __user *) optval))
1704 case L2CAP_CONNINFO:
1705 if (sk->sk_state != BT_CONNECTED &&
1706 !(sk->sk_state == BT_CONNECT2 &&
1707 bt_sk(sk)->defer_setup)) {
1712 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1713 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1715 len = min_t(unsigned int, len, sizeof(cinfo));
1716 if (copy_to_user(optval, (char *) &cinfo, len))
1730 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1732 struct sock *sk = sock->sk;
1733 struct bt_security sec;
1736 BT_DBG("sk %p", sk);
1738 if (level == SOL_L2CAP)
1739 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1741 if (level != SOL_BLUETOOTH)
1742 return -ENOPROTOOPT;
1744 if (get_user(len, optlen))
1751 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1756 sec.level = l2cap_pi(sk)->sec_level;
1758 len = min_t(unsigned int, len, sizeof(sec));
1759 if (copy_to_user(optval, (char *) &sec, len))
1764 case BT_DEFER_SETUP:
1765 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1770 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
1784 static int l2cap_sock_shutdown(struct socket *sock, int how)
1786 struct sock *sk = sock->sk;
1789 BT_DBG("sock %p, sk %p", sock, sk);
1795 if (!sk->sk_shutdown) {
1796 sk->sk_shutdown = SHUTDOWN_MASK;
1797 l2cap_sock_clear_timer(sk);
1798 __l2cap_sock_close(sk, 0);
1800 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1801 err = bt_sock_wait_state(sk, BT_CLOSED,
1808 static int l2cap_sock_release(struct socket *sock)
1810 struct sock *sk = sock->sk;
1813 BT_DBG("sock %p, sk %p", sock, sk);
1818 err = l2cap_sock_shutdown(sock, 2);
1821 l2cap_sock_kill(sk);
1825 static void l2cap_chan_ready(struct sock *sk)
1827 struct sock *parent = bt_sk(sk)->parent;
1829 BT_DBG("sk %p, parent %p", sk, parent);
1831 l2cap_pi(sk)->conf_state = 0;
1832 l2cap_sock_clear_timer(sk);
1835 /* Outgoing channel.
1836 * Wake up socket sleeping on connect.
1838 sk->sk_state = BT_CONNECTED;
1839 sk->sk_state_change(sk);
1841 /* Incoming channel.
1842 * Wake up socket sleeping on accept.
1844 parent->sk_data_ready(parent, 0);
1848 /* Copy frame to all raw sockets on that connection */
1849 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1851 struct l2cap_chan_list *l = &conn->chan_list;
1852 struct sk_buff *nskb;
1855 BT_DBG("conn %p", conn);
1857 read_lock(&l->lock);
1858 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1859 if (sk->sk_type != SOCK_RAW)
1862 /* Don't send frame to the socket it came from */
1865 nskb = skb_clone(skb, GFP_ATOMIC);
1869 if (sock_queue_rcv_skb(sk, nskb))
1872 read_unlock(&l->lock);
1875 /* ---- L2CAP signalling commands ---- */
1876 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1877 u8 code, u8 ident, u16 dlen, void *data)
1879 struct sk_buff *skb, **frag;
1880 struct l2cap_cmd_hdr *cmd;
1881 struct l2cap_hdr *lh;
1884 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1885 conn, code, ident, dlen);
1887 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1888 count = min_t(unsigned int, conn->mtu, len);
1890 skb = bt_skb_alloc(count, GFP_ATOMIC);
1894 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1895 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1896 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1898 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1901 cmd->len = cpu_to_le16(dlen);
1904 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1905 memcpy(skb_put(skb, count), data, count);
1911 /* Continuation fragments (no L2CAP header) */
1912 frag = &skb_shinfo(skb)->frag_list;
1914 count = min_t(unsigned int, conn->mtu, len);
1916 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1920 memcpy(skb_put(*frag, count), data, count);
1925 frag = &(*frag)->next;
1935 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1937 struct l2cap_conf_opt *opt = *ptr;
1940 len = L2CAP_CONF_OPT_SIZE + opt->len;
1948 *val = *((u8 *) opt->val);
1952 *val = __le16_to_cpu(*((__le16 *) opt->val));
1956 *val = __le32_to_cpu(*((__le32 *) opt->val));
1960 *val = (unsigned long) opt->val;
1964 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1968 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1970 struct l2cap_conf_opt *opt = *ptr;
1972 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1979 *((u8 *) opt->val) = val;
1983 *((__le16 *) opt->val) = cpu_to_le16(val);
1987 *((__le32 *) opt->val) = cpu_to_le32(val);
1991 memcpy(opt->val, (void *) val, len);
1995 *ptr += L2CAP_CONF_OPT_SIZE + len;
1998 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2000 u32 local_feat_mask = l2cap_feat_mask;
2002 local_feat_mask |= L2CAP_FEAT_ERTM;
2005 case L2CAP_MODE_ERTM:
2006 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2007 case L2CAP_MODE_STREAMING:
2008 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2014 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2017 case L2CAP_MODE_STREAMING:
2018 case L2CAP_MODE_ERTM:
2019 if (l2cap_mode_supported(mode, remote_feat_mask))
2023 return L2CAP_MODE_BASIC;
2027 static int l2cap_build_conf_req(struct sock *sk, void *data)
2029 struct l2cap_pinfo *pi = l2cap_pi(sk);
2030 struct l2cap_conf_req *req = data;
2031 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
2032 void *ptr = req->data;
2034 BT_DBG("sk %p", sk);
2036 if (pi->num_conf_req || pi->num_conf_rsp)
2040 case L2CAP_MODE_STREAMING:
2041 case L2CAP_MODE_ERTM:
2042 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2043 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2044 l2cap_send_disconn_req(pi->conn, sk);
2047 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2053 case L2CAP_MODE_BASIC:
2054 if (pi->imtu != L2CAP_DEFAULT_MTU)
2055 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2058 case L2CAP_MODE_ERTM:
2059 rfc.mode = L2CAP_MODE_ERTM;
2060 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2061 rfc.max_transmit = L2CAP_DEFAULT_MAX_RECEIVE;
2062 rfc.retrans_timeout = 0;
2063 rfc.monitor_timeout = 0;
2064 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2066 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2067 sizeof(rfc), (unsigned long) &rfc);
2070 case L2CAP_MODE_STREAMING:
2071 rfc.mode = L2CAP_MODE_STREAMING;
2073 rfc.max_transmit = 0;
2074 rfc.retrans_timeout = 0;
2075 rfc.monitor_timeout = 0;
2076 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2078 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2079 sizeof(rfc), (unsigned long) &rfc);
2083 /* FIXME: Need actual value of the flush timeout */
2084 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2085 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2087 req->dcid = cpu_to_le16(pi->dcid);
2088 req->flags = cpu_to_le16(0);
2093 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2095 struct l2cap_pinfo *pi = l2cap_pi(sk);
2096 struct l2cap_conf_rsp *rsp = data;
2097 void *ptr = rsp->data;
2098 void *req = pi->conf_req;
2099 int len = pi->conf_len;
2100 int type, hint, olen;
2102 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2103 u16 mtu = L2CAP_DEFAULT_MTU;
2104 u16 result = L2CAP_CONF_SUCCESS;
2106 BT_DBG("sk %p", sk);
2108 while (len >= L2CAP_CONF_OPT_SIZE) {
2109 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2111 hint = type & L2CAP_CONF_HINT;
2112 type &= L2CAP_CONF_MASK;
2115 case L2CAP_CONF_MTU:
2119 case L2CAP_CONF_FLUSH_TO:
2123 case L2CAP_CONF_QOS:
2126 case L2CAP_CONF_RFC:
2127 if (olen == sizeof(rfc))
2128 memcpy(&rfc, (void *) val, olen);
2135 result = L2CAP_CONF_UNKNOWN;
2136 *((u8 *) ptr++) = type;
2141 if (pi->num_conf_rsp || pi->num_conf_req)
2145 case L2CAP_MODE_STREAMING:
2146 case L2CAP_MODE_ERTM:
2147 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2148 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2149 return -ECONNREFUSED;
2152 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2157 if (pi->mode != rfc.mode) {
2158 result = L2CAP_CONF_UNACCEPT;
2159 rfc.mode = pi->mode;
2161 if (pi->num_conf_rsp == 1)
2162 return -ECONNREFUSED;
2164 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2165 sizeof(rfc), (unsigned long) &rfc);
2169 if (result == L2CAP_CONF_SUCCESS) {
2170 /* Configure output options and let the other side know
2171 * which ones we don't like. */
2173 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2174 result = L2CAP_CONF_UNACCEPT;
2177 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2179 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2182 case L2CAP_MODE_BASIC:
2183 pi->fcs = L2CAP_FCS_NONE;
2184 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2187 case L2CAP_MODE_ERTM:
2188 pi->remote_tx_win = rfc.txwin_size;
2189 pi->remote_max_tx = rfc.max_transmit;
2190 pi->max_pdu_size = rfc.max_pdu_size;
2192 rfc.retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
2193 rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
2195 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2198 case L2CAP_MODE_STREAMING:
2199 pi->remote_tx_win = rfc.txwin_size;
2200 pi->max_pdu_size = rfc.max_pdu_size;
2202 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2206 result = L2CAP_CONF_UNACCEPT;
2208 memset(&rfc, 0, sizeof(rfc));
2209 rfc.mode = pi->mode;
2212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2213 sizeof(rfc), (unsigned long) &rfc);
2215 if (result == L2CAP_CONF_SUCCESS)
2216 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2218 rsp->scid = cpu_to_le16(pi->dcid);
2219 rsp->result = cpu_to_le16(result);
2220 rsp->flags = cpu_to_le16(0x0000);
2225 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2227 struct l2cap_pinfo *pi = l2cap_pi(sk);
2228 struct l2cap_conf_req *req = data;
2229 void *ptr = req->data;
2232 struct l2cap_conf_rfc rfc;
2234 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2236 while (len >= L2CAP_CONF_OPT_SIZE) {
2237 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2240 case L2CAP_CONF_MTU:
2241 if (val < L2CAP_DEFAULT_MIN_MTU) {
2242 *result = L2CAP_CONF_UNACCEPT;
2243 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2246 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2249 case L2CAP_CONF_FLUSH_TO:
2251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2255 case L2CAP_CONF_RFC:
2256 if (olen == sizeof(rfc))
2257 memcpy(&rfc, (void *)val, olen);
2259 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2260 rfc.mode != pi->mode)
2261 return -ECONNREFUSED;
2263 pi->mode = rfc.mode;
2266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2267 sizeof(rfc), (unsigned long) &rfc);
2272 if (*result == L2CAP_CONF_SUCCESS) {
2274 case L2CAP_MODE_ERTM:
2275 pi->remote_tx_win = rfc.txwin_size;
2276 pi->retrans_timeout = rfc.retrans_timeout;
2277 pi->monitor_timeout = rfc.monitor_timeout;
2278 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2280 case L2CAP_MODE_STREAMING:
2281 pi->max_pdu_size = le16_to_cpu(rfc.max_pdu_size);
2286 req->dcid = cpu_to_le16(pi->dcid);
2287 req->flags = cpu_to_le16(0x0000);
2292 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2294 struct l2cap_conf_rsp *rsp = data;
2295 void *ptr = rsp->data;
2297 BT_DBG("sk %p", sk);
2299 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2300 rsp->result = cpu_to_le16(result);
2301 rsp->flags = cpu_to_le16(flags);
2306 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2308 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2310 if (rej->reason != 0x0000)
2313 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2314 cmd->ident == conn->info_ident) {
2315 del_timer(&conn->info_timer);
2317 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2318 conn->info_ident = 0;
2320 l2cap_conn_start(conn);
2326 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2328 struct l2cap_chan_list *list = &conn->chan_list;
2329 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2330 struct l2cap_conn_rsp rsp;
2331 struct sock *sk, *parent;
2332 int result, status = L2CAP_CS_NO_INFO;
2334 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2335 __le16 psm = req->psm;
2337 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2339 /* Check if we have socket listening on psm */
2340 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2342 result = L2CAP_CR_BAD_PSM;
2346 /* Check if the ACL is secure enough (if not SDP) */
2347 if (psm != cpu_to_le16(0x0001) &&
2348 !hci_conn_check_link_mode(conn->hcon)) {
2349 conn->disc_reason = 0x05;
2350 result = L2CAP_CR_SEC_BLOCK;
2354 result = L2CAP_CR_NO_MEM;
2356 /* Check for backlog size */
2357 if (sk_acceptq_is_full(parent)) {
2358 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2362 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2366 write_lock_bh(&list->lock);
2368 /* Check if we already have channel with that dcid */
2369 if (__l2cap_get_chan_by_dcid(list, scid)) {
2370 write_unlock_bh(&list->lock);
2371 sock_set_flag(sk, SOCK_ZAPPED);
2372 l2cap_sock_kill(sk);
2376 hci_conn_hold(conn->hcon);
2378 l2cap_sock_init(sk, parent);
2379 bacpy(&bt_sk(sk)->src, conn->src);
2380 bacpy(&bt_sk(sk)->dst, conn->dst);
2381 l2cap_pi(sk)->psm = psm;
2382 l2cap_pi(sk)->dcid = scid;
2384 __l2cap_chan_add(conn, sk, parent);
2385 dcid = l2cap_pi(sk)->scid;
2387 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2389 l2cap_pi(sk)->ident = cmd->ident;
2391 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2392 if (l2cap_check_security(sk)) {
2393 if (bt_sk(sk)->defer_setup) {
2394 sk->sk_state = BT_CONNECT2;
2395 result = L2CAP_CR_PEND;
2396 status = L2CAP_CS_AUTHOR_PEND;
2397 parent->sk_data_ready(parent, 0);
2399 sk->sk_state = BT_CONFIG;
2400 result = L2CAP_CR_SUCCESS;
2401 status = L2CAP_CS_NO_INFO;
2404 sk->sk_state = BT_CONNECT2;
2405 result = L2CAP_CR_PEND;
2406 status = L2CAP_CS_AUTHEN_PEND;
2409 sk->sk_state = BT_CONNECT2;
2410 result = L2CAP_CR_PEND;
2411 status = L2CAP_CS_NO_INFO;
2414 write_unlock_bh(&list->lock);
2417 bh_unlock_sock(parent);
2420 rsp.scid = cpu_to_le16(scid);
2421 rsp.dcid = cpu_to_le16(dcid);
2422 rsp.result = cpu_to_le16(result);
2423 rsp.status = cpu_to_le16(status);
2424 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2426 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2427 struct l2cap_info_req info;
2428 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2430 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2431 conn->info_ident = l2cap_get_ident(conn);
2433 mod_timer(&conn->info_timer, jiffies +
2434 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2436 l2cap_send_cmd(conn, conn->info_ident,
2437 L2CAP_INFO_REQ, sizeof(info), &info);
2443 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2445 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2446 u16 scid, dcid, result, status;
2450 scid = __le16_to_cpu(rsp->scid);
2451 dcid = __le16_to_cpu(rsp->dcid);
2452 result = __le16_to_cpu(rsp->result);
2453 status = __le16_to_cpu(rsp->status);
2455 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2458 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2462 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2468 case L2CAP_CR_SUCCESS:
2469 sk->sk_state = BT_CONFIG;
2470 l2cap_pi(sk)->ident = 0;
2471 l2cap_pi(sk)->dcid = dcid;
2472 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2474 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(sk, req), req);
2478 l2cap_pi(sk)->num_conf_req++;
2482 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2486 l2cap_chan_del(sk, ECONNREFUSED);
2494 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2496 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2502 dcid = __le16_to_cpu(req->dcid);
2503 flags = __le16_to_cpu(req->flags);
2505 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2507 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2511 if (sk->sk_state == BT_DISCONN)
2514 /* Reject if config buffer is too small. */
2515 len = cmd_len - sizeof(*req);
2516 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2517 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2518 l2cap_build_conf_rsp(sk, rsp,
2519 L2CAP_CONF_REJECT, flags), rsp);
2524 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2525 l2cap_pi(sk)->conf_len += len;
2527 if (flags & 0x0001) {
2528 /* Incomplete config. Send empty response. */
2529 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2530 l2cap_build_conf_rsp(sk, rsp,
2531 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2535 /* Complete config. */
2536 len = l2cap_parse_conf_req(sk, rsp);
2538 l2cap_send_disconn_req(conn, sk);
2542 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2543 l2cap_pi(sk)->num_conf_rsp++;
2545 /* Reset config buffer. */
2546 l2cap_pi(sk)->conf_len = 0;
2548 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2551 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2552 sk->sk_state = BT_CONNECTED;
2553 l2cap_pi(sk)->next_tx_seq = 0;
2554 l2cap_pi(sk)->expected_ack_seq = 0;
2555 l2cap_pi(sk)->unacked_frames = 0;
2556 __skb_queue_head_init(TX_QUEUE(sk));
2557 l2cap_chan_ready(sk);
2561 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2564 l2cap_build_conf_req(sk, buf), buf);
2565 l2cap_pi(sk)->num_conf_req++;
2573 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2575 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2576 u16 scid, flags, result;
2579 scid = __le16_to_cpu(rsp->scid);
2580 flags = __le16_to_cpu(rsp->flags);
2581 result = __le16_to_cpu(rsp->result);
2583 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2584 scid, flags, result);
2586 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2591 case L2CAP_CONF_SUCCESS:
2594 case L2CAP_CONF_UNACCEPT:
2595 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2596 int len = cmd->len - sizeof(*rsp);
2599 /* throw out any old stored conf requests */
2600 result = L2CAP_CONF_SUCCESS;
2601 len = l2cap_parse_conf_rsp(sk, rsp->data,
2604 l2cap_send_disconn_req(conn, sk);
2608 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2609 L2CAP_CONF_REQ, len, req);
2610 l2cap_pi(sk)->num_conf_req++;
2611 if (result != L2CAP_CONF_SUCCESS)
2617 sk->sk_state = BT_DISCONN;
2618 sk->sk_err = ECONNRESET;
2619 l2cap_sock_set_timer(sk, HZ * 5);
2620 l2cap_send_disconn_req(conn, sk);
2627 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2629 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2630 sk->sk_state = BT_CONNECTED;
2631 l2cap_pi(sk)->expected_tx_seq = 0;
2632 l2cap_pi(sk)->num_to_ack = 0;
2633 __skb_queue_head_init(TX_QUEUE(sk));
2634 l2cap_chan_ready(sk);
2642 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2644 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2645 struct l2cap_disconn_rsp rsp;
2649 scid = __le16_to_cpu(req->scid);
2650 dcid = __le16_to_cpu(req->dcid);
2652 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2654 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2658 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2659 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2660 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2662 sk->sk_shutdown = SHUTDOWN_MASK;
2664 skb_queue_purge(TX_QUEUE(sk));
2666 l2cap_chan_del(sk, ECONNRESET);
2669 l2cap_sock_kill(sk);
2673 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2675 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2679 scid = __le16_to_cpu(rsp->scid);
2680 dcid = __le16_to_cpu(rsp->dcid);
2682 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2684 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2688 skb_queue_purge(TX_QUEUE(sk));
2690 l2cap_chan_del(sk, 0);
2693 l2cap_sock_kill(sk);
2697 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2699 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2702 type = __le16_to_cpu(req->type);
2704 BT_DBG("type 0x%4.4x", type);
2706 if (type == L2CAP_IT_FEAT_MASK) {
2708 u32 feat_mask = l2cap_feat_mask;
2709 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2710 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2711 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2713 feat_mask |= L2CAP_FEAT_ERTM;
2714 put_unaligned(cpu_to_le32(feat_mask), (__le32 *) rsp->data);
2715 l2cap_send_cmd(conn, cmd->ident,
2716 L2CAP_INFO_RSP, sizeof(buf), buf);
2717 } else if (type == L2CAP_IT_FIXED_CHAN) {
2719 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2720 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2721 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2722 memcpy(buf + 4, l2cap_fixed_chan, 8);
2723 l2cap_send_cmd(conn, cmd->ident,
2724 L2CAP_INFO_RSP, sizeof(buf), buf);
2726 struct l2cap_info_rsp rsp;
2727 rsp.type = cpu_to_le16(type);
2728 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2729 l2cap_send_cmd(conn, cmd->ident,
2730 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2736 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2738 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2741 type = __le16_to_cpu(rsp->type);
2742 result = __le16_to_cpu(rsp->result);
2744 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2746 del_timer(&conn->info_timer);
2748 if (type == L2CAP_IT_FEAT_MASK) {
2749 conn->feat_mask = get_unaligned_le32(rsp->data);
2751 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2752 struct l2cap_info_req req;
2753 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2755 conn->info_ident = l2cap_get_ident(conn);
2757 l2cap_send_cmd(conn, conn->info_ident,
2758 L2CAP_INFO_REQ, sizeof(req), &req);
2760 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2761 conn->info_ident = 0;
2763 l2cap_conn_start(conn);
2765 } else if (type == L2CAP_IT_FIXED_CHAN) {
2766 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2767 conn->info_ident = 0;
2769 l2cap_conn_start(conn);
2775 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
2777 u8 *data = skb->data;
2779 struct l2cap_cmd_hdr cmd;
2782 l2cap_raw_recv(conn, skb);
2784 while (len >= L2CAP_CMD_HDR_SIZE) {
2786 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2787 data += L2CAP_CMD_HDR_SIZE;
2788 len -= L2CAP_CMD_HDR_SIZE;
2790 cmd_len = le16_to_cpu(cmd.len);
2792 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2794 if (cmd_len > len || !cmd.ident) {
2795 BT_DBG("corrupted command");
2800 case L2CAP_COMMAND_REJ:
2801 l2cap_command_rej(conn, &cmd, data);
2804 case L2CAP_CONN_REQ:
2805 err = l2cap_connect_req(conn, &cmd, data);
2808 case L2CAP_CONN_RSP:
2809 err = l2cap_connect_rsp(conn, &cmd, data);
2812 case L2CAP_CONF_REQ:
2813 err = l2cap_config_req(conn, &cmd, cmd_len, data);
2816 case L2CAP_CONF_RSP:
2817 err = l2cap_config_rsp(conn, &cmd, data);
2820 case L2CAP_DISCONN_REQ:
2821 err = l2cap_disconnect_req(conn, &cmd, data);
2824 case L2CAP_DISCONN_RSP:
2825 err = l2cap_disconnect_rsp(conn, &cmd, data);
2828 case L2CAP_ECHO_REQ:
2829 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2832 case L2CAP_ECHO_RSP:
2835 case L2CAP_INFO_REQ:
2836 err = l2cap_information_req(conn, &cmd, data);
2839 case L2CAP_INFO_RSP:
2840 err = l2cap_information_rsp(conn, &cmd, data);
2844 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2850 struct l2cap_cmd_rej rej;
2851 BT_DBG("error %d", err);
2853 /* FIXME: Map err to a valid reason */
2854 rej.reason = cpu_to_le16(0);
2855 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2865 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
2867 struct l2cap_pinfo *pi = l2cap_pi(sk);
2868 struct sk_buff *_skb;
2871 switch (control & L2CAP_CTRL_SAR) {
2872 case L2CAP_SDU_UNSEGMENTED:
2873 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2878 err = sock_queue_rcv_skb(sk, skb);
2884 case L2CAP_SDU_START:
2885 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
2890 pi->sdu_len = get_unaligned_le16(skb->data);
2893 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
2899 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2901 pi->conn_state |= L2CAP_CONN_SAR_SDU;
2902 pi->partial_sdu_len = skb->len;
2906 case L2CAP_SDU_CONTINUE:
2907 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2910 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2912 pi->partial_sdu_len += skb->len;
2913 if (pi->partial_sdu_len > pi->sdu_len)
2921 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
2924 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
2926 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
2927 pi->partial_sdu_len += skb->len;
2929 if (pi->partial_sdu_len == pi->sdu_len) {
2930 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
2931 err = sock_queue_rcv_skb(sk, _skb);
2945 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
2947 struct l2cap_pinfo *pi = l2cap_pi(sk);
2948 u8 tx_seq = __get_txseq(rx_control);
2952 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
2954 if (tx_seq == pi->expected_tx_seq) {
2955 if (pi->conn_state & L2CAP_CONN_UNDER_REJ)
2956 pi->conn_state &= ~L2CAP_CONN_UNDER_REJ;
2958 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
2962 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
2963 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
2964 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1) {
2965 tx_control |= L2CAP_SUPER_RCV_READY;
2966 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2970 /* Unexpected txSeq. Send a REJ S-frame */
2972 if (!(pi->conn_state & L2CAP_CONN_UNDER_REJ)) {
2973 tx_control |= L2CAP_SUPER_REJECT;
2974 tx_control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2975 pi->conn_state |= L2CAP_CONN_UNDER_REJ;
2983 return l2cap_send_sframe(pi, tx_control);
2986 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
2988 struct l2cap_pinfo *pi = l2cap_pi(sk);
2990 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
2992 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
2993 case L2CAP_SUPER_RCV_READY:
2994 pi->expected_ack_seq = __get_reqseq(rx_control);
2995 l2cap_drop_acked_frames(sk);
2996 l2cap_ertm_send(sk);
2999 case L2CAP_SUPER_REJECT:
3000 pi->expected_ack_seq = __get_reqseq(rx_control);
3001 l2cap_drop_acked_frames(sk);
3003 sk->sk_send_head = TX_QUEUE(sk)->next;
3004 pi->next_tx_seq = pi->expected_ack_seq;
3006 l2cap_ertm_send(sk);
3010 case L2CAP_SUPER_RCV_NOT_READY:
3011 case L2CAP_SUPER_SELECT_REJECT:
3018 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3024 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3026 BT_DBG("unknown cid 0x%4.4x", cid);
3030 BT_DBG("sk %p, len %d", sk, skb->len);
3032 if (sk->sk_state != BT_CONNECTED)
3035 switch (l2cap_pi(sk)->mode) {
3036 case L2CAP_MODE_BASIC:
3037 /* If socket recv buffers overflows we drop data here
3038 * which is *bad* because L2CAP has to be reliable.
3039 * But we don't have any other choice. L2CAP doesn't
3040 * provide flow control mechanism. */
3042 if (l2cap_pi(sk)->imtu < skb->len)
3045 if (!sock_queue_rcv_skb(sk, skb))
3049 case L2CAP_MODE_ERTM:
3050 control = get_unaligned_le16(skb->data);
3054 if (__is_sar_start(control))
3058 * We can just drop the corrupted I-frame here.
3059 * Receiver will miss it and start proper recovery
3060 * procedures and ask retransmission.
3062 if (len > L2CAP_DEFAULT_MAX_PDU_SIZE)
3065 if (__is_iframe(control))
3066 err = l2cap_data_channel_iframe(sk, control, skb);
3068 err = l2cap_data_channel_sframe(sk, control, skb);
3075 BT_DBG("sk %p: bad mode 0x%2.2x", sk, l2cap_pi(sk)->mode);
3089 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3093 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3097 BT_DBG("sk %p, len %d", sk, skb->len);
3099 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3102 if (l2cap_pi(sk)->imtu < skb->len)
3105 if (!sock_queue_rcv_skb(sk, skb))
3117 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3119 struct l2cap_hdr *lh = (void *) skb->data;
3123 skb_pull(skb, L2CAP_HDR_SIZE);
3124 cid = __le16_to_cpu(lh->cid);
3125 len = __le16_to_cpu(lh->len);
3127 if (len != skb->len) {
3132 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3135 case L2CAP_CID_SIGNALING:
3136 l2cap_sig_channel(conn, skb);
3139 case L2CAP_CID_CONN_LESS:
3140 psm = get_unaligned((__le16 *) skb->data);
3142 l2cap_conless_channel(conn, psm, skb);
3146 l2cap_data_channel(conn, cid, skb);
3151 /* ---- L2CAP interface with lower layer (HCI) ---- */
3153 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3155 int exact = 0, lm1 = 0, lm2 = 0;
3156 register struct sock *sk;
3157 struct hlist_node *node;
3159 if (type != ACL_LINK)
3162 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3164 /* Find listening sockets and check their link_mode */
3165 read_lock(&l2cap_sk_list.lock);
3166 sk_for_each(sk, node, &l2cap_sk_list.head) {
3167 if (sk->sk_state != BT_LISTEN)
3170 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3171 lm1 |= HCI_LM_ACCEPT;
3172 if (l2cap_pi(sk)->role_switch)
3173 lm1 |= HCI_LM_MASTER;
3175 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3176 lm2 |= HCI_LM_ACCEPT;
3177 if (l2cap_pi(sk)->role_switch)
3178 lm2 |= HCI_LM_MASTER;
3181 read_unlock(&l2cap_sk_list.lock);
3183 return exact ? lm1 : lm2;
3186 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3188 struct l2cap_conn *conn;
3190 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3192 if (hcon->type != ACL_LINK)
3196 conn = l2cap_conn_add(hcon, status);
3198 l2cap_conn_ready(conn);
3200 l2cap_conn_del(hcon, bt_err(status));
3205 static int l2cap_disconn_ind(struct hci_conn *hcon)
3207 struct l2cap_conn *conn = hcon->l2cap_data;
3209 BT_DBG("hcon %p", hcon);
3211 if (hcon->type != ACL_LINK || !conn)
3214 return conn->disc_reason;
3217 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3219 BT_DBG("hcon %p reason %d", hcon, reason);
3221 if (hcon->type != ACL_LINK)
3224 l2cap_conn_del(hcon, bt_err(reason));
3229 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3231 if (sk->sk_type != SOCK_SEQPACKET)
3234 if (encrypt == 0x00) {
3235 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3236 l2cap_sock_clear_timer(sk);
3237 l2cap_sock_set_timer(sk, HZ * 5);
3238 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3239 __l2cap_sock_close(sk, ECONNREFUSED);
3241 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3242 l2cap_sock_clear_timer(sk);
3246 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3248 struct l2cap_chan_list *l;
3249 struct l2cap_conn *conn = hcon->l2cap_data;
3255 l = &conn->chan_list;
3257 BT_DBG("conn %p", conn);
3259 read_lock(&l->lock);
3261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
3264 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
3269 if (!status && (sk->sk_state == BT_CONNECTED ||
3270 sk->sk_state == BT_CONFIG)) {
3271 l2cap_check_encryption(sk, encrypt);
3276 if (sk->sk_state == BT_CONNECT) {
3278 struct l2cap_conn_req req;
3279 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3280 req.psm = l2cap_pi(sk)->psm;
3282 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
3284 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3285 L2CAP_CONN_REQ, sizeof(req), &req);
3287 l2cap_sock_clear_timer(sk);
3288 l2cap_sock_set_timer(sk, HZ / 10);
3290 } else if (sk->sk_state == BT_CONNECT2) {
3291 struct l2cap_conn_rsp rsp;
3295 sk->sk_state = BT_CONFIG;
3296 result = L2CAP_CR_SUCCESS;
3298 sk->sk_state = BT_DISCONN;
3299 l2cap_sock_set_timer(sk, HZ / 10);
3300 result = L2CAP_CR_SEC_BLOCK;
3303 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3304 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3305 rsp.result = cpu_to_le16(result);
3306 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3307 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
3308 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3314 read_unlock(&l->lock);
3319 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3321 struct l2cap_conn *conn = hcon->l2cap_data;
3323 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
3326 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3328 if (flags & ACL_START) {
3329 struct l2cap_hdr *hdr;
3333 BT_ERR("Unexpected start frame (len %d)", skb->len);
3334 kfree_skb(conn->rx_skb);
3335 conn->rx_skb = NULL;
3337 l2cap_conn_unreliable(conn, ECOMM);
3341 BT_ERR("Frame is too short (len %d)", skb->len);
3342 l2cap_conn_unreliable(conn, ECOMM);
3346 hdr = (struct l2cap_hdr *) skb->data;
3347 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
3349 if (len == skb->len) {
3350 /* Complete frame received */
3351 l2cap_recv_frame(conn, skb);
3355 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
3357 if (skb->len > len) {
3358 BT_ERR("Frame is too long (len %d, expected len %d)",
3360 l2cap_conn_unreliable(conn, ECOMM);
3364 /* Allocate skb for the complete frame (with header) */
3365 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
3369 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3371 conn->rx_len = len - skb->len;
3373 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
3375 if (!conn->rx_len) {
3376 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
3377 l2cap_conn_unreliable(conn, ECOMM);
3381 if (skb->len > conn->rx_len) {
3382 BT_ERR("Fragment is too long (len %d, expected %d)",
3383 skb->len, conn->rx_len);
3384 kfree_skb(conn->rx_skb);
3385 conn->rx_skb = NULL;
3387 l2cap_conn_unreliable(conn, ECOMM);
3391 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
3393 conn->rx_len -= skb->len;
3395 if (!conn->rx_len) {
3396 /* Complete frame received */
3397 l2cap_recv_frame(conn, conn->rx_skb);
3398 conn->rx_skb = NULL;
3407 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
3410 struct hlist_node *node;
3413 read_lock_bh(&l2cap_sk_list.lock);
3415 sk_for_each(sk, node, &l2cap_sk_list.head) {
3416 struct l2cap_pinfo *pi = l2cap_pi(sk);
3418 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
3419 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
3420 sk->sk_state, __le16_to_cpu(pi->psm), pi->scid,
3421 pi->dcid, pi->imtu, pi->omtu, pi->sec_level);
3424 read_unlock_bh(&l2cap_sk_list.lock);
3429 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
3431 static const struct proto_ops l2cap_sock_ops = {
3432 .family = PF_BLUETOOTH,
3433 .owner = THIS_MODULE,
3434 .release = l2cap_sock_release,
3435 .bind = l2cap_sock_bind,
3436 .connect = l2cap_sock_connect,
3437 .listen = l2cap_sock_listen,
3438 .accept = l2cap_sock_accept,
3439 .getname = l2cap_sock_getname,
3440 .sendmsg = l2cap_sock_sendmsg,
3441 .recvmsg = l2cap_sock_recvmsg,
3442 .poll = bt_sock_poll,
3443 .ioctl = bt_sock_ioctl,
3444 .mmap = sock_no_mmap,
3445 .socketpair = sock_no_socketpair,
3446 .shutdown = l2cap_sock_shutdown,
3447 .setsockopt = l2cap_sock_setsockopt,
3448 .getsockopt = l2cap_sock_getsockopt
3451 static struct net_proto_family l2cap_sock_family_ops = {
3452 .family = PF_BLUETOOTH,
3453 .owner = THIS_MODULE,
3454 .create = l2cap_sock_create,
3457 static struct hci_proto l2cap_hci_proto = {
3459 .id = HCI_PROTO_L2CAP,
3460 .connect_ind = l2cap_connect_ind,
3461 .connect_cfm = l2cap_connect_cfm,
3462 .disconn_ind = l2cap_disconn_ind,
3463 .disconn_cfm = l2cap_disconn_cfm,
3464 .security_cfm = l2cap_security_cfm,
3465 .recv_acldata = l2cap_recv_acldata
3468 static int __init l2cap_init(void)
3472 err = proto_register(&l2cap_proto, 0);
3476 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
3478 BT_ERR("L2CAP socket registration failed");
3482 err = hci_register_proto(&l2cap_hci_proto);
3484 BT_ERR("L2CAP protocol registration failed");
3485 bt_sock_unregister(BTPROTO_L2CAP);
3489 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
3490 BT_ERR("Failed to create L2CAP info file");
3492 BT_INFO("L2CAP ver %s", VERSION);
3493 BT_INFO("L2CAP socket layer initialized");
3498 proto_unregister(&l2cap_proto);
3502 static void __exit l2cap_exit(void)
3504 class_remove_file(bt_class, &class_attr_l2cap);
3506 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
3507 BT_ERR("L2CAP socket unregistration failed");
3509 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
3510 BT_ERR("L2CAP protocol unregistration failed");
3512 proto_unregister(&l2cap_proto);
3515 void l2cap_load(void)
3517 /* Dummy function to trigger automatic L2CAP module loading by
3518 * other modules that use L2CAP sockets but don't use any other
3519 * symbols from it. */
3522 EXPORT_SYMBOL(l2cap_load);
3524 module_init(l2cap_init);
3525 module_exit(l2cap_exit);
3527 module_param(enable_ertm, bool, 0644);
3528 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
3530 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
3531 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
3532 MODULE_VERSION(VERSION);
3533 MODULE_LICENSE("GPL");
3534 MODULE_ALIAS("bt-proto-0");