2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
64 static const struct proto_ops l2cap_sock_ops;
66 static struct bt_sock_list l2cap_sk_list = {
67 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
70 static void __l2cap_sock_close(struct sock *sk, int reason);
71 static void l2cap_sock_close(struct sock *sk);
72 static void l2cap_sock_kill(struct sock *sk);
74 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
75 u8 code, u8 ident, u16 dlen, void *data);
77 /* ---- L2CAP timers ---- */
78 static void l2cap_sock_timeout(unsigned long arg)
80 struct sock *sk = (struct sock *) arg;
83 BT_DBG("sock %p state %d", sk, sk->sk_state);
87 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
88 reason = ECONNREFUSED;
89 else if (sk->sk_state == BT_CONNECT &&
90 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
91 reason = ECONNREFUSED;
95 __l2cap_sock_close(sk, reason);
103 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
105 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
106 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
109 static void l2cap_sock_clear_timer(struct sock *sk)
111 BT_DBG("sock %p state %d", sk, sk->sk_state);
112 sk_stop_timer(sk, &sk->sk_timer);
115 /* ---- L2CAP channels ---- */
116 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
119 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
120 if (l2cap_pi(s)->dcid == cid)
126 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
129 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
130 if (l2cap_pi(s)->scid == cid)
136 /* Find channel with given SCID.
137 * Returns locked socket */
138 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
142 s = __l2cap_get_chan_by_scid(l, cid);
145 read_unlock(&l->lock);
149 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
152 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
153 if (l2cap_pi(s)->ident == ident)
159 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
163 s = __l2cap_get_chan_by_ident(l, ident);
166 read_unlock(&l->lock);
170 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
172 u16 cid = L2CAP_CID_DYN_START;
174 for (; cid < L2CAP_CID_DYN_END; cid++) {
175 if (!__l2cap_get_chan_by_scid(l, cid))
182 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
187 l2cap_pi(l->head)->prev_c = sk;
189 l2cap_pi(sk)->next_c = l->head;
190 l2cap_pi(sk)->prev_c = NULL;
194 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
196 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
198 write_lock_bh(&l->lock);
203 l2cap_pi(next)->prev_c = prev;
205 l2cap_pi(prev)->next_c = next;
206 write_unlock_bh(&l->lock);
211 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
213 struct l2cap_chan_list *l = &conn->chan_list;
215 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
216 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
218 conn->disc_reason = 0x13;
220 l2cap_pi(sk)->conn = conn;
222 if (sk->sk_type == SOCK_SEQPACKET) {
223 /* Alloc CID for connection-oriented socket */
224 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
225 } else if (sk->sk_type == SOCK_DGRAM) {
226 /* Connectionless socket */
227 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
228 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
231 /* Raw socket can send/recv signalling messages only */
232 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
233 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 __l2cap_chan_link(l, sk);
240 bt_accept_enqueue(parent, sk);
244 * Must be called on the locked socket. */
245 static void l2cap_chan_del(struct sock *sk, int err)
247 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
248 struct sock *parent = bt_sk(sk)->parent;
250 l2cap_sock_clear_timer(sk);
252 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
255 /* Unlink from channel list */
256 l2cap_chan_unlink(&conn->chan_list, sk);
257 l2cap_pi(sk)->conn = NULL;
258 hci_conn_put(conn->hcon);
261 sk->sk_state = BT_CLOSED;
262 sock_set_flag(sk, SOCK_ZAPPED);
268 bt_accept_unlink(sk);
269 parent->sk_data_ready(parent, 0);
271 sk->sk_state_change(sk);
274 /* Service level security */
275 static inline int l2cap_check_security(struct sock *sk)
277 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
280 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
281 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
282 auth_type = HCI_AT_NO_BONDING_MITM;
284 auth_type = HCI_AT_NO_BONDING;
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
287 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
289 switch (l2cap_pi(sk)->sec_level) {
290 case BT_SECURITY_HIGH:
291 auth_type = HCI_AT_GENERAL_BONDING_MITM;
293 case BT_SECURITY_MEDIUM:
294 auth_type = HCI_AT_GENERAL_BONDING;
297 auth_type = HCI_AT_NO_BONDING;
302 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
306 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
310 /* Get next available identificator.
311 * 1 - 128 are used by kernel.
312 * 129 - 199 are reserved.
313 * 200 - 254 are used by utilities like l2ping, etc.
316 spin_lock_bh(&conn->lock);
318 if (++conn->tx_ident > 128)
323 spin_unlock_bh(&conn->lock);
328 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
330 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
332 BT_DBG("code 0x%2.2x", code);
337 return hci_send_acl(conn->hcon, skb, 0);
340 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
343 struct l2cap_hdr *lh;
344 struct l2cap_conn *conn = pi->conn;
345 int count, hlen = L2CAP_HDR_SIZE + 2;
347 if (pi->fcs == L2CAP_FCS_CRC16)
350 BT_DBG("pi %p, control 0x%2.2x", pi, control);
352 count = min_t(unsigned int, conn->mtu, hlen);
353 control |= L2CAP_CTRL_FRAME_TYPE;
355 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
356 control |= L2CAP_CTRL_FINAL;
357 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
360 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
361 control |= L2CAP_CTRL_POLL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
365 skb = bt_skb_alloc(count, GFP_ATOMIC);
369 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
370 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
371 lh->cid = cpu_to_le16(pi->dcid);
372 put_unaligned_le16(control, skb_put(skb, 2));
374 if (pi->fcs == L2CAP_FCS_CRC16) {
375 u16 fcs = crc16(0, (u8 *)lh, count - 2);
376 put_unaligned_le16(fcs, skb_put(skb, 2));
379 return hci_send_acl(pi->conn->hcon, skb, 0);
382 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
384 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
385 control |= L2CAP_SUPER_RCV_NOT_READY;
387 control |= L2CAP_SUPER_RCV_READY;
389 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
391 return l2cap_send_sframe(pi, control);
394 static void l2cap_do_start(struct sock *sk)
396 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
398 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
399 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
402 if (l2cap_check_security(sk)) {
403 struct l2cap_conn_req req;
404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
405 req.psm = l2cap_pi(sk)->psm;
407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
410 L2CAP_CONN_REQ, sizeof(req), &req);
413 struct l2cap_info_req req;
414 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
416 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
417 conn->info_ident = l2cap_get_ident(conn);
419 mod_timer(&conn->info_timer, jiffies +
420 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
422 l2cap_send_cmd(conn, conn->info_ident,
423 L2CAP_INFO_REQ, sizeof(req), &req);
427 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
429 struct l2cap_disconn_req req;
431 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 l2cap_send_cmd(conn, l2cap_get_ident(conn),
434 L2CAP_DISCONN_REQ, sizeof(req), &req);
437 /* ---- L2CAP connections ---- */
438 static void l2cap_conn_start(struct l2cap_conn *conn)
440 struct l2cap_chan_list *l = &conn->chan_list;
443 BT_DBG("conn %p", conn);
447 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
450 if (sk->sk_type != SOCK_SEQPACKET) {
455 if (sk->sk_state == BT_CONNECT) {
456 if (l2cap_check_security(sk)) {
457 struct l2cap_conn_req req;
458 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
459 req.psm = l2cap_pi(sk)->psm;
461 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
464 L2CAP_CONN_REQ, sizeof(req), &req);
466 } else if (sk->sk_state == BT_CONNECT2) {
467 struct l2cap_conn_rsp rsp;
468 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
469 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
471 if (l2cap_check_security(sk)) {
472 if (bt_sk(sk)->defer_setup) {
473 struct sock *parent = bt_sk(sk)->parent;
474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
476 parent->sk_data_ready(parent, 0);
479 sk->sk_state = BT_CONFIG;
480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
488 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
489 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
495 read_unlock(&l->lock);
498 static void l2cap_conn_ready(struct l2cap_conn *conn)
500 struct l2cap_chan_list *l = &conn->chan_list;
503 BT_DBG("conn %p", conn);
507 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
510 if (sk->sk_type != SOCK_SEQPACKET) {
511 l2cap_sock_clear_timer(sk);
512 sk->sk_state = BT_CONNECTED;
513 sk->sk_state_change(sk);
514 } else if (sk->sk_state == BT_CONNECT)
520 read_unlock(&l->lock);
523 /* Notify sockets that we cannot guaranty reliability anymore */
524 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
526 struct l2cap_chan_list *l = &conn->chan_list;
529 BT_DBG("conn %p", conn);
533 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
534 if (l2cap_pi(sk)->force_reliable)
538 read_unlock(&l->lock);
541 static void l2cap_info_timeout(unsigned long arg)
543 struct l2cap_conn *conn = (void *) arg;
545 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
546 conn->info_ident = 0;
548 l2cap_conn_start(conn);
551 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
553 struct l2cap_conn *conn = hcon->l2cap_data;
558 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
562 hcon->l2cap_data = conn;
565 BT_DBG("hcon %p conn %p", hcon, conn);
567 conn->mtu = hcon->hdev->acl_mtu;
568 conn->src = &hcon->hdev->bdaddr;
569 conn->dst = &hcon->dst;
573 spin_lock_init(&conn->lock);
574 rwlock_init(&conn->chan_list.lock);
576 setup_timer(&conn->info_timer, l2cap_info_timeout,
577 (unsigned long) conn);
579 conn->disc_reason = 0x13;
584 static void l2cap_conn_del(struct hci_conn *hcon, int err)
586 struct l2cap_conn *conn = hcon->l2cap_data;
592 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
594 kfree_skb(conn->rx_skb);
597 while ((sk = conn->chan_list.head)) {
599 l2cap_chan_del(sk, err);
604 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
605 del_timer_sync(&conn->info_timer);
607 hcon->l2cap_data = NULL;
611 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
613 struct l2cap_chan_list *l = &conn->chan_list;
614 write_lock_bh(&l->lock);
615 __l2cap_chan_add(conn, sk, parent);
616 write_unlock_bh(&l->lock);
619 /* ---- Socket interface ---- */
620 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
623 struct hlist_node *node;
624 sk_for_each(sk, node, &l2cap_sk_list.head)
625 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
632 /* Find socket with psm and source bdaddr.
633 * Returns closest match.
635 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
637 struct sock *sk = NULL, *sk1 = NULL;
638 struct hlist_node *node;
640 sk_for_each(sk, node, &l2cap_sk_list.head) {
641 if (state && sk->sk_state != state)
644 if (l2cap_pi(sk)->psm == psm) {
646 if (!bacmp(&bt_sk(sk)->src, src))
650 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
654 return node ? sk : sk1;
657 /* Find socket with given address (psm, src).
658 * Returns locked socket */
659 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
662 read_lock(&l2cap_sk_list.lock);
663 s = __l2cap_get_sock_by_psm(state, psm, src);
666 read_unlock(&l2cap_sk_list.lock);
670 static void l2cap_sock_destruct(struct sock *sk)
674 skb_queue_purge(&sk->sk_receive_queue);
675 skb_queue_purge(&sk->sk_write_queue);
678 static void l2cap_sock_cleanup_listen(struct sock *parent)
682 BT_DBG("parent %p", parent);
684 /* Close not yet accepted channels */
685 while ((sk = bt_accept_dequeue(parent, NULL)))
686 l2cap_sock_close(sk);
688 parent->sk_state = BT_CLOSED;
689 sock_set_flag(parent, SOCK_ZAPPED);
692 /* Kill socket (only if zapped and orphan)
693 * Must be called on unlocked socket.
695 static void l2cap_sock_kill(struct sock *sk)
697 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
700 BT_DBG("sk %p state %d", sk, sk->sk_state);
702 /* Kill poor orphan */
703 bt_sock_unlink(&l2cap_sk_list, sk);
704 sock_set_flag(sk, SOCK_DEAD);
708 static void __l2cap_sock_close(struct sock *sk, int reason)
710 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
712 switch (sk->sk_state) {
714 l2cap_sock_cleanup_listen(sk);
719 if (sk->sk_type == SOCK_SEQPACKET) {
720 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
722 sk->sk_state = BT_DISCONN;
723 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
724 l2cap_send_disconn_req(conn, sk);
726 l2cap_chan_del(sk, reason);
730 if (sk->sk_type == SOCK_SEQPACKET) {
731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
732 struct l2cap_conn_rsp rsp;
735 if (bt_sk(sk)->defer_setup)
736 result = L2CAP_CR_SEC_BLOCK;
738 result = L2CAP_CR_BAD_PSM;
740 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
741 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
742 rsp.result = cpu_to_le16(result);
743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
744 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
745 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
747 l2cap_chan_del(sk, reason);
752 l2cap_chan_del(sk, reason);
756 sock_set_flag(sk, SOCK_ZAPPED);
761 /* Must be called on unlocked socket. */
762 static void l2cap_sock_close(struct sock *sk)
764 l2cap_sock_clear_timer(sk);
766 __l2cap_sock_close(sk, ECONNRESET);
771 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
773 struct l2cap_pinfo *pi = l2cap_pi(sk);
778 sk->sk_type = parent->sk_type;
779 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
781 pi->imtu = l2cap_pi(parent)->imtu;
782 pi->omtu = l2cap_pi(parent)->omtu;
783 pi->mode = l2cap_pi(parent)->mode;
784 pi->fcs = l2cap_pi(parent)->fcs;
785 pi->sec_level = l2cap_pi(parent)->sec_level;
786 pi->role_switch = l2cap_pi(parent)->role_switch;
787 pi->force_reliable = l2cap_pi(parent)->force_reliable;
789 pi->imtu = L2CAP_DEFAULT_MTU;
791 pi->mode = L2CAP_MODE_BASIC;
792 pi->fcs = L2CAP_FCS_CRC16;
793 pi->sec_level = BT_SECURITY_LOW;
795 pi->force_reliable = 0;
798 /* Default config options */
800 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
801 skb_queue_head_init(TX_QUEUE(sk));
802 skb_queue_head_init(SREJ_QUEUE(sk));
803 INIT_LIST_HEAD(SREJ_LIST(sk));
806 static struct proto l2cap_proto = {
808 .owner = THIS_MODULE,
809 .obj_size = sizeof(struct l2cap_pinfo)
812 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
816 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
820 sock_init_data(sock, sk);
821 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
823 sk->sk_destruct = l2cap_sock_destruct;
824 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
826 sock_reset_flag(sk, SOCK_ZAPPED);
828 sk->sk_protocol = proto;
829 sk->sk_state = BT_OPEN;
831 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
833 bt_sock_link(&l2cap_sk_list, sk);
837 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
842 BT_DBG("sock %p", sock);
844 sock->state = SS_UNCONNECTED;
846 if (sock->type != SOCK_SEQPACKET &&
847 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
848 return -ESOCKTNOSUPPORT;
850 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
853 sock->ops = &l2cap_sock_ops;
855 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
859 l2cap_sock_init(sk, NULL);
863 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
865 struct sock *sk = sock->sk;
866 struct sockaddr_l2 la;
871 if (!addr || addr->sa_family != AF_BLUETOOTH)
874 memset(&la, 0, sizeof(la));
875 len = min_t(unsigned int, sizeof(la), alen);
876 memcpy(&la, addr, len);
883 if (sk->sk_state != BT_OPEN) {
888 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
889 !capable(CAP_NET_BIND_SERVICE)) {
894 write_lock_bh(&l2cap_sk_list.lock);
896 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
899 /* Save source address */
900 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
901 l2cap_pi(sk)->psm = la.l2_psm;
902 l2cap_pi(sk)->sport = la.l2_psm;
903 sk->sk_state = BT_BOUND;
905 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
906 __le16_to_cpu(la.l2_psm) == 0x0003)
907 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
910 write_unlock_bh(&l2cap_sk_list.lock);
917 static int l2cap_do_connect(struct sock *sk)
919 bdaddr_t *src = &bt_sk(sk)->src;
920 bdaddr_t *dst = &bt_sk(sk)->dst;
921 struct l2cap_conn *conn;
922 struct hci_conn *hcon;
923 struct hci_dev *hdev;
927 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
930 hdev = hci_get_route(dst, src);
932 return -EHOSTUNREACH;
934 hci_dev_lock_bh(hdev);
938 if (sk->sk_type == SOCK_RAW) {
939 switch (l2cap_pi(sk)->sec_level) {
940 case BT_SECURITY_HIGH:
941 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
943 case BT_SECURITY_MEDIUM:
944 auth_type = HCI_AT_DEDICATED_BONDING;
947 auth_type = HCI_AT_NO_BONDING;
950 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
951 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
952 auth_type = HCI_AT_NO_BONDING_MITM;
954 auth_type = HCI_AT_NO_BONDING;
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
957 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
959 switch (l2cap_pi(sk)->sec_level) {
960 case BT_SECURITY_HIGH:
961 auth_type = HCI_AT_GENERAL_BONDING_MITM;
963 case BT_SECURITY_MEDIUM:
964 auth_type = HCI_AT_GENERAL_BONDING;
967 auth_type = HCI_AT_NO_BONDING;
972 hcon = hci_connect(hdev, ACL_LINK, dst,
973 l2cap_pi(sk)->sec_level, auth_type);
977 conn = l2cap_conn_add(hcon, 0);
985 /* Update source addr of the socket */
986 bacpy(src, conn->src);
988 l2cap_chan_add(conn, sk, NULL);
990 sk->sk_state = BT_CONNECT;
991 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
993 if (hcon->state == BT_CONNECTED) {
994 if (sk->sk_type != SOCK_SEQPACKET) {
995 l2cap_sock_clear_timer(sk);
996 sk->sk_state = BT_CONNECTED;
1002 hci_dev_unlock_bh(hdev);
1007 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1009 struct sock *sk = sock->sk;
1010 struct sockaddr_l2 la;
1013 BT_DBG("sk %p", sk);
1015 if (!addr || alen < sizeof(addr->sa_family) ||
1016 addr->sa_family != AF_BLUETOOTH)
1019 memset(&la, 0, sizeof(la));
1020 len = min_t(unsigned int, sizeof(la), alen);
1021 memcpy(&la, addr, len);
1028 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1033 switch (l2cap_pi(sk)->mode) {
1034 case L2CAP_MODE_BASIC:
1036 case L2CAP_MODE_ERTM:
1037 case L2CAP_MODE_STREAMING:
1046 switch (sk->sk_state) {
1050 /* Already connecting */
1054 /* Already connected */
1067 /* Set destination address and psm */
1068 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1069 l2cap_pi(sk)->psm = la.l2_psm;
1071 err = l2cap_do_connect(sk);
1076 err = bt_sock_wait_state(sk, BT_CONNECTED,
1077 sock_sndtimeo(sk, flags & O_NONBLOCK));
1083 static int l2cap_sock_listen(struct socket *sock, int backlog)
1085 struct sock *sk = sock->sk;
1088 BT_DBG("sk %p backlog %d", sk, backlog);
1092 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 if (!l2cap_pi(sk)->psm) {
1111 bdaddr_t *src = &bt_sk(sk)->src;
1116 write_lock_bh(&l2cap_sk_list.lock);
1118 for (psm = 0x1001; psm < 0x1100; psm += 2)
1119 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1120 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1121 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1126 write_unlock_bh(&l2cap_sk_list.lock);
1132 sk->sk_max_ack_backlog = backlog;
1133 sk->sk_ack_backlog = 0;
1134 sk->sk_state = BT_LISTEN;
1141 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1143 DECLARE_WAITQUEUE(wait, current);
1144 struct sock *sk = sock->sk, *nsk;
1148 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1150 if (sk->sk_state != BT_LISTEN) {
1155 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1157 BT_DBG("sk %p timeo %ld", sk, timeo);
1159 /* Wait for an incoming connection. (wake-one). */
1160 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1161 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1162 set_current_state(TASK_INTERRUPTIBLE);
1169 timeo = schedule_timeout(timeo);
1170 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1172 if (sk->sk_state != BT_LISTEN) {
1177 if (signal_pending(current)) {
1178 err = sock_intr_errno(timeo);
1182 set_current_state(TASK_RUNNING);
1183 remove_wait_queue(sk_sleep(sk), &wait);
1188 newsock->state = SS_CONNECTED;
1190 BT_DBG("new socket %p", nsk);
1197 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1199 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1200 struct sock *sk = sock->sk;
1202 BT_DBG("sock %p, sk %p", sock, sk);
1204 addr->sa_family = AF_BLUETOOTH;
1205 *len = sizeof(struct sockaddr_l2);
1208 la->l2_psm = l2cap_pi(sk)->psm;
1209 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1210 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1212 la->l2_psm = l2cap_pi(sk)->sport;
1213 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1214 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1220 static void l2cap_monitor_timeout(unsigned long arg)
1222 struct sock *sk = (void *) arg;
1226 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1227 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1232 l2cap_pi(sk)->retry_count++;
1233 __mod_monitor_timer();
1235 control = L2CAP_CTRL_POLL;
1236 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1240 static void l2cap_retrans_timeout(unsigned long arg)
1242 struct sock *sk = (void *) arg;
1246 l2cap_pi(sk)->retry_count = 1;
1247 __mod_monitor_timer();
1249 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1251 control = L2CAP_CTRL_POLL;
1252 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1256 static void l2cap_drop_acked_frames(struct sock *sk)
1258 struct sk_buff *skb;
1260 while ((skb = skb_peek(TX_QUEUE(sk)))) {
1261 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1264 skb = skb_dequeue(TX_QUEUE(sk));
1267 l2cap_pi(sk)->unacked_frames--;
1270 if (!l2cap_pi(sk)->unacked_frames)
1271 del_timer(&l2cap_pi(sk)->retrans_timer);
1276 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1278 struct l2cap_pinfo *pi = l2cap_pi(sk);
1281 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1283 err = hci_send_acl(pi->conn->hcon, skb, 0);
1290 static int l2cap_streaming_send(struct sock *sk)
1292 struct sk_buff *skb, *tx_skb;
1293 struct l2cap_pinfo *pi = l2cap_pi(sk);
1297 while ((skb = sk->sk_send_head)) {
1298 tx_skb = skb_clone(skb, GFP_ATOMIC);
1300 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1301 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1302 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1304 if (pi->fcs == L2CAP_FCS_CRC16) {
1305 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1306 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1309 err = l2cap_do_send(sk, tx_skb);
1311 l2cap_send_disconn_req(pi->conn, sk);
1315 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1317 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1318 sk->sk_send_head = NULL;
1320 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1322 skb = skb_dequeue(TX_QUEUE(sk));
1328 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1330 struct l2cap_pinfo *pi = l2cap_pi(sk);
1331 struct sk_buff *skb, *tx_skb;
1335 skb = skb_peek(TX_QUEUE(sk));
1337 if (bt_cb(skb)->tx_seq != tx_seq) {
1338 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1340 skb = skb_queue_next(TX_QUEUE(sk), skb);
1344 if (pi->remote_max_tx &&
1345 bt_cb(skb)->retries == pi->remote_max_tx) {
1346 l2cap_send_disconn_req(pi->conn, sk);
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1351 bt_cb(skb)->retries++;
1352 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1353 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1354 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1355 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1357 if (pi->fcs == L2CAP_FCS_CRC16) {
1358 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1359 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1362 err = l2cap_do_send(sk, tx_skb);
1364 l2cap_send_disconn_req(pi->conn, sk);
1372 static int l2cap_ertm_send(struct sock *sk)
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1379 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1382 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1383 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1385 if (pi->remote_max_tx &&
1386 bt_cb(skb)->retries == pi->remote_max_tx) {
1387 l2cap_send_disconn_req(pi->conn, sk);
1391 tx_skb = skb_clone(skb, GFP_ATOMIC);
1393 bt_cb(skb)->retries++;
1395 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1396 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1397 control |= L2CAP_CTRL_FINAL;
1398 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1400 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1401 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1402 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1405 if (pi->fcs == L2CAP_FCS_CRC16) {
1406 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1407 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1410 err = l2cap_do_send(sk, tx_skb);
1412 l2cap_send_disconn_req(pi->conn, sk);
1415 __mod_retrans_timer();
1417 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1418 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1420 pi->unacked_frames++;
1423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1434 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1436 struct sock *sk = (struct sock *)pi;
1439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1441 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1442 control |= L2CAP_SUPER_RCV_NOT_READY;
1443 return l2cap_send_sframe(pi, control);
1444 } else if (l2cap_ertm_send(sk) == 0) {
1445 control |= L2CAP_SUPER_RCV_READY;
1446 return l2cap_send_sframe(pi, control);
1451 static int l2cap_send_srejtail(struct sock *sk)
1453 struct srej_list *tail;
1456 control = L2CAP_SUPER_SELECT_REJECT;
1457 control |= L2CAP_CTRL_FINAL;
1459 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1460 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1462 l2cap_send_sframe(l2cap_pi(sk), control);
1467 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1469 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1470 struct sk_buff **frag;
1473 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1483 count = min_t(unsigned int, conn->mtu, len);
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1494 frag = &(*frag)->next;
1500 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1502 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1503 struct sk_buff *skb;
1504 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1505 struct l2cap_hdr *lh;
1507 BT_DBG("sk %p len %d", sk, (int)len);
1509 count = min_t(unsigned int, (conn->mtu - hlen), len);
1510 skb = bt_skb_send_alloc(sk, count + hlen,
1511 msg->msg_flags & MSG_DONTWAIT, &err);
1513 return ERR_PTR(-ENOMEM);
1515 /* Create L2CAP header */
1516 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1517 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1518 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1519 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1521 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1522 if (unlikely(err < 0)) {
1524 return ERR_PTR(err);
1529 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1531 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1532 struct sk_buff *skb;
1533 int err, count, hlen = L2CAP_HDR_SIZE;
1534 struct l2cap_hdr *lh;
1536 BT_DBG("sk %p len %d", sk, (int)len);
1538 count = min_t(unsigned int, (conn->mtu - hlen), len);
1539 skb = bt_skb_send_alloc(sk, count + hlen,
1540 msg->msg_flags & MSG_DONTWAIT, &err);
1542 return ERR_PTR(-ENOMEM);
1544 /* Create L2CAP header */
1545 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1546 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1547 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1549 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1550 if (unlikely(err < 0)) {
1552 return ERR_PTR(err);
1557 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1559 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1562 struct l2cap_hdr *lh;
1564 BT_DBG("sk %p len %d", sk, (int)len);
1569 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1572 count = min_t(unsigned int, (conn->mtu - hlen), len);
1573 skb = bt_skb_send_alloc(sk, count + hlen,
1574 msg->msg_flags & MSG_DONTWAIT, &err);
1576 return ERR_PTR(-ENOMEM);
1578 /* Create L2CAP header */
1579 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1580 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1581 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1582 put_unaligned_le16(control, skb_put(skb, 2));
1584 put_unaligned_le16(sdulen, skb_put(skb, 2));
1586 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1587 if (unlikely(err < 0)) {
1589 return ERR_PTR(err);
1592 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1593 put_unaligned_le16(0, skb_put(skb, 2));
1595 bt_cb(skb)->retries = 0;
1599 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1601 struct l2cap_pinfo *pi = l2cap_pi(sk);
1602 struct sk_buff *skb;
1603 struct sk_buff_head sar_queue;
1607 __skb_queue_head_init(&sar_queue);
1608 control = L2CAP_SDU_START;
1609 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1611 return PTR_ERR(skb);
1613 __skb_queue_tail(&sar_queue, skb);
1614 len -= pi->remote_mps;
1615 size += pi->remote_mps;
1621 if (len > pi->remote_mps) {
1622 control |= L2CAP_SDU_CONTINUE;
1623 buflen = pi->remote_mps;
1625 control |= L2CAP_SDU_END;
1629 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1631 skb_queue_purge(&sar_queue);
1632 return PTR_ERR(skb);
1635 __skb_queue_tail(&sar_queue, skb);
1640 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1641 if (sk->sk_send_head == NULL)
1642 sk->sk_send_head = sar_queue.next;
1647 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1649 struct sock *sk = sock->sk;
1650 struct l2cap_pinfo *pi = l2cap_pi(sk);
1651 struct sk_buff *skb;
1655 BT_DBG("sock %p, sk %p", sock, sk);
1657 err = sock_error(sk);
1661 if (msg->msg_flags & MSG_OOB)
1666 if (sk->sk_state != BT_CONNECTED) {
1671 /* Connectionless channel */
1672 if (sk->sk_type == SOCK_DGRAM) {
1673 skb = l2cap_create_connless_pdu(sk, msg, len);
1677 err = l2cap_do_send(sk, skb);
1682 case L2CAP_MODE_BASIC:
1683 /* Check outgoing MTU */
1684 if (len > pi->omtu) {
1689 /* Create a basic PDU */
1690 skb = l2cap_create_basic_pdu(sk, msg, len);
1696 err = l2cap_do_send(sk, skb);
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= pi->remote_mps) {
1705 control = L2CAP_SDU_UNSEGMENTED;
1706 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1711 __skb_queue_tail(TX_QUEUE(sk), skb);
1712 if (sk->sk_send_head == NULL)
1713 sk->sk_send_head = skb;
1715 /* Segment SDU into multiples PDUs */
1716 err = l2cap_sar_segment_sdu(sk, msg, len);
1721 if (pi->mode == L2CAP_MODE_STREAMING)
1722 err = l2cap_streaming_send(sk);
1724 err = l2cap_ertm_send(sk);
1731 BT_DBG("bad state %1.1x", pi->mode);
1740 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1742 struct sock *sk = sock->sk;
1746 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1747 struct l2cap_conn_rsp rsp;
1749 sk->sk_state = BT_CONFIG;
1751 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1752 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1753 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1754 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1755 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1756 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1764 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1767 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1769 struct sock *sk = sock->sk;
1770 struct l2cap_options opts;
1774 BT_DBG("sk %p", sk);
1780 opts.imtu = l2cap_pi(sk)->imtu;
1781 opts.omtu = l2cap_pi(sk)->omtu;
1782 opts.flush_to = l2cap_pi(sk)->flush_to;
1783 opts.mode = l2cap_pi(sk)->mode;
1784 opts.fcs = l2cap_pi(sk)->fcs;
1786 len = min_t(unsigned int, sizeof(opts), optlen);
1787 if (copy_from_user((char *) &opts, optval, len)) {
1792 l2cap_pi(sk)->imtu = opts.imtu;
1793 l2cap_pi(sk)->omtu = opts.omtu;
1794 l2cap_pi(sk)->mode = opts.mode;
1795 l2cap_pi(sk)->fcs = opts.fcs;
1799 if (get_user(opt, (u32 __user *) optval)) {
1804 if (opt & L2CAP_LM_AUTH)
1805 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1806 if (opt & L2CAP_LM_ENCRYPT)
1807 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1808 if (opt & L2CAP_LM_SECURE)
1809 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1811 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1812 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1824 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1826 struct sock *sk = sock->sk;
1827 struct bt_security sec;
1831 BT_DBG("sk %p", sk);
1833 if (level == SOL_L2CAP)
1834 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1836 if (level != SOL_BLUETOOTH)
1837 return -ENOPROTOOPT;
1843 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1848 sec.level = BT_SECURITY_LOW;
1850 len = min_t(unsigned int, sizeof(sec), optlen);
1851 if (copy_from_user((char *) &sec, optval, len)) {
1856 if (sec.level < BT_SECURITY_LOW ||
1857 sec.level > BT_SECURITY_HIGH) {
1862 l2cap_pi(sk)->sec_level = sec.level;
1865 case BT_DEFER_SETUP:
1866 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1871 if (get_user(opt, (u32 __user *) optval)) {
1876 bt_sk(sk)->defer_setup = opt;
1888 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1890 struct sock *sk = sock->sk;
1891 struct l2cap_options opts;
1892 struct l2cap_conninfo cinfo;
1896 BT_DBG("sk %p", sk);
1898 if (get_user(len, optlen))
1905 opts.imtu = l2cap_pi(sk)->imtu;
1906 opts.omtu = l2cap_pi(sk)->omtu;
1907 opts.flush_to = l2cap_pi(sk)->flush_to;
1908 opts.mode = l2cap_pi(sk)->mode;
1909 opts.fcs = l2cap_pi(sk)->fcs;
1911 len = min_t(unsigned int, len, sizeof(opts));
1912 if (copy_to_user(optval, (char *) &opts, len))
1918 switch (l2cap_pi(sk)->sec_level) {
1919 case BT_SECURITY_LOW:
1920 opt = L2CAP_LM_AUTH;
1922 case BT_SECURITY_MEDIUM:
1923 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1925 case BT_SECURITY_HIGH:
1926 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1934 if (l2cap_pi(sk)->role_switch)
1935 opt |= L2CAP_LM_MASTER;
1937 if (l2cap_pi(sk)->force_reliable)
1938 opt |= L2CAP_LM_RELIABLE;
1940 if (put_user(opt, (u32 __user *) optval))
1944 case L2CAP_CONNINFO:
1945 if (sk->sk_state != BT_CONNECTED &&
1946 !(sk->sk_state == BT_CONNECT2 &&
1947 bt_sk(sk)->defer_setup)) {
1952 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1953 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1955 len = min_t(unsigned int, len, sizeof(cinfo));
1956 if (copy_to_user(optval, (char *) &cinfo, len))
1970 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1972 struct sock *sk = sock->sk;
1973 struct bt_security sec;
1976 BT_DBG("sk %p", sk);
1978 if (level == SOL_L2CAP)
1979 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1981 if (level != SOL_BLUETOOTH)
1982 return -ENOPROTOOPT;
1984 if (get_user(len, optlen))
1991 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1996 sec.level = l2cap_pi(sk)->sec_level;
1998 len = min_t(unsigned int, len, sizeof(sec));
1999 if (copy_to_user(optval, (char *) &sec, len))
2004 case BT_DEFER_SETUP:
2005 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2010 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2024 static int l2cap_sock_shutdown(struct socket *sock, int how)
2026 struct sock *sk = sock->sk;
2029 BT_DBG("sock %p, sk %p", sock, sk);
2035 if (!sk->sk_shutdown) {
2036 sk->sk_shutdown = SHUTDOWN_MASK;
2037 l2cap_sock_clear_timer(sk);
2038 __l2cap_sock_close(sk, 0);
2040 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2041 err = bt_sock_wait_state(sk, BT_CLOSED,
2048 static int l2cap_sock_release(struct socket *sock)
2050 struct sock *sk = sock->sk;
2053 BT_DBG("sock %p, sk %p", sock, sk);
2058 err = l2cap_sock_shutdown(sock, 2);
2061 l2cap_sock_kill(sk);
2065 static void l2cap_chan_ready(struct sock *sk)
2067 struct sock *parent = bt_sk(sk)->parent;
2069 BT_DBG("sk %p, parent %p", sk, parent);
2071 l2cap_pi(sk)->conf_state = 0;
2072 l2cap_sock_clear_timer(sk);
2075 /* Outgoing channel.
2076 * Wake up socket sleeping on connect.
2078 sk->sk_state = BT_CONNECTED;
2079 sk->sk_state_change(sk);
2081 /* Incoming channel.
2082 * Wake up socket sleeping on accept.
2084 parent->sk_data_ready(parent, 0);
2088 /* Copy frame to all raw sockets on that connection */
2089 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2091 struct l2cap_chan_list *l = &conn->chan_list;
2092 struct sk_buff *nskb;
2095 BT_DBG("conn %p", conn);
2097 read_lock(&l->lock);
2098 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2099 if (sk->sk_type != SOCK_RAW)
2102 /* Don't send frame to the socket it came from */
2105 nskb = skb_clone(skb, GFP_ATOMIC);
2109 if (sock_queue_rcv_skb(sk, nskb))
2112 read_unlock(&l->lock);
2115 /* ---- L2CAP signalling commands ---- */
2116 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2117 u8 code, u8 ident, u16 dlen, void *data)
2119 struct sk_buff *skb, **frag;
2120 struct l2cap_cmd_hdr *cmd;
2121 struct l2cap_hdr *lh;
2124 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2125 conn, code, ident, dlen);
2127 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2128 count = min_t(unsigned int, conn->mtu, len);
2130 skb = bt_skb_alloc(count, GFP_ATOMIC);
2134 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2135 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2136 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2138 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2141 cmd->len = cpu_to_le16(dlen);
2144 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2145 memcpy(skb_put(skb, count), data, count);
2151 /* Continuation fragments (no L2CAP header) */
2152 frag = &skb_shinfo(skb)->frag_list;
2154 count = min_t(unsigned int, conn->mtu, len);
2156 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2160 memcpy(skb_put(*frag, count), data, count);
2165 frag = &(*frag)->next;
2175 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2177 struct l2cap_conf_opt *opt = *ptr;
2180 len = L2CAP_CONF_OPT_SIZE + opt->len;
2188 *val = *((u8 *) opt->val);
2192 *val = __le16_to_cpu(*((__le16 *) opt->val));
2196 *val = __le32_to_cpu(*((__le32 *) opt->val));
2200 *val = (unsigned long) opt->val;
2204 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2208 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2210 struct l2cap_conf_opt *opt = *ptr;
2212 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2219 *((u8 *) opt->val) = val;
2223 *((__le16 *) opt->val) = cpu_to_le16(val);
2227 *((__le32 *) opt->val) = cpu_to_le32(val);
2231 memcpy(opt->val, (void *) val, len);
2235 *ptr += L2CAP_CONF_OPT_SIZE + len;
2238 static void l2cap_ack_timeout(unsigned long arg)
2240 struct sock *sk = (void *) arg;
2243 l2cap_send_ack(l2cap_pi(sk));
2247 static inline void l2cap_ertm_init(struct sock *sk)
2249 l2cap_pi(sk)->expected_ack_seq = 0;
2250 l2cap_pi(sk)->unacked_frames = 0;
2251 l2cap_pi(sk)->buffer_seq = 0;
2252 l2cap_pi(sk)->num_to_ack = 0;
2253 l2cap_pi(sk)->frames_sent = 0;
2255 setup_timer(&l2cap_pi(sk)->retrans_timer,
2256 l2cap_retrans_timeout, (unsigned long) sk);
2257 setup_timer(&l2cap_pi(sk)->monitor_timer,
2258 l2cap_monitor_timeout, (unsigned long) sk);
2259 setup_timer(&l2cap_pi(sk)->ack_timer,
2260 l2cap_ack_timeout, (unsigned long) sk);
2262 __skb_queue_head_init(SREJ_QUEUE(sk));
2265 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2267 u32 local_feat_mask = l2cap_feat_mask;
2269 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2272 case L2CAP_MODE_ERTM:
2273 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2274 case L2CAP_MODE_STREAMING:
2275 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2281 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2284 case L2CAP_MODE_STREAMING:
2285 case L2CAP_MODE_ERTM:
2286 if (l2cap_mode_supported(mode, remote_feat_mask))
2290 return L2CAP_MODE_BASIC;
2294 static int l2cap_build_conf_req(struct sock *sk, void *data)
2296 struct l2cap_pinfo *pi = l2cap_pi(sk);
2297 struct l2cap_conf_req *req = data;
2298 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2299 void *ptr = req->data;
2301 BT_DBG("sk %p", sk);
2303 if (pi->num_conf_req || pi->num_conf_rsp)
2307 case L2CAP_MODE_STREAMING:
2308 case L2CAP_MODE_ERTM:
2309 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2310 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2311 l2cap_send_disconn_req(pi->conn, sk);
2314 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2320 case L2CAP_MODE_BASIC:
2321 if (pi->imtu != L2CAP_DEFAULT_MTU)
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2325 case L2CAP_MODE_ERTM:
2326 rfc.mode = L2CAP_MODE_ERTM;
2327 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2328 rfc.max_transmit = max_transmit;
2329 rfc.retrans_timeout = 0;
2330 rfc.monitor_timeout = 0;
2331 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2332 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2333 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2335 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2336 sizeof(rfc), (unsigned long) &rfc);
2338 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2341 if (pi->fcs == L2CAP_FCS_NONE ||
2342 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2343 pi->fcs = L2CAP_FCS_NONE;
2344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2348 case L2CAP_MODE_STREAMING:
2349 rfc.mode = L2CAP_MODE_STREAMING;
2351 rfc.max_transmit = 0;
2352 rfc.retrans_timeout = 0;
2353 rfc.monitor_timeout = 0;
2354 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2355 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2356 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2359 sizeof(rfc), (unsigned long) &rfc);
2361 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2364 if (pi->fcs == L2CAP_FCS_NONE ||
2365 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2366 pi->fcs = L2CAP_FCS_NONE;
2367 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2372 /* FIXME: Need actual value of the flush timeout */
2373 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2374 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2376 req->dcid = cpu_to_le16(pi->dcid);
2377 req->flags = cpu_to_le16(0);
2382 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2384 struct l2cap_pinfo *pi = l2cap_pi(sk);
2385 struct l2cap_conf_rsp *rsp = data;
2386 void *ptr = rsp->data;
2387 void *req = pi->conf_req;
2388 int len = pi->conf_len;
2389 int type, hint, olen;
2391 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2392 u16 mtu = L2CAP_DEFAULT_MTU;
2393 u16 result = L2CAP_CONF_SUCCESS;
2395 BT_DBG("sk %p", sk);
2397 while (len >= L2CAP_CONF_OPT_SIZE) {
2398 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2400 hint = type & L2CAP_CONF_HINT;
2401 type &= L2CAP_CONF_MASK;
2404 case L2CAP_CONF_MTU:
2408 case L2CAP_CONF_FLUSH_TO:
2412 case L2CAP_CONF_QOS:
2415 case L2CAP_CONF_RFC:
2416 if (olen == sizeof(rfc))
2417 memcpy(&rfc, (void *) val, olen);
2420 case L2CAP_CONF_FCS:
2421 if (val == L2CAP_FCS_NONE)
2422 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2430 result = L2CAP_CONF_UNKNOWN;
2431 *((u8 *) ptr++) = type;
2436 if (pi->num_conf_rsp || pi->num_conf_req)
2440 case L2CAP_MODE_STREAMING:
2441 case L2CAP_MODE_ERTM:
2442 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2443 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2444 return -ECONNREFUSED;
2447 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2452 if (pi->mode != rfc.mode) {
2453 result = L2CAP_CONF_UNACCEPT;
2454 rfc.mode = pi->mode;
2456 if (pi->num_conf_rsp == 1)
2457 return -ECONNREFUSED;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2460 sizeof(rfc), (unsigned long) &rfc);
2464 if (result == L2CAP_CONF_SUCCESS) {
2465 /* Configure output options and let the other side know
2466 * which ones we don't like. */
2468 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2469 result = L2CAP_CONF_UNACCEPT;
2472 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2477 case L2CAP_MODE_BASIC:
2478 pi->fcs = L2CAP_FCS_NONE;
2479 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2482 case L2CAP_MODE_ERTM:
2483 pi->remote_tx_win = rfc.txwin_size;
2484 pi->remote_max_tx = rfc.max_transmit;
2485 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2486 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2488 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2490 rfc.retrans_timeout =
2491 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2492 rfc.monitor_timeout =
2493 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2495 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2498 sizeof(rfc), (unsigned long) &rfc);
2502 case L2CAP_MODE_STREAMING:
2503 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2504 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2506 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2508 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2511 sizeof(rfc), (unsigned long) &rfc);
2516 result = L2CAP_CONF_UNACCEPT;
2518 memset(&rfc, 0, sizeof(rfc));
2519 rfc.mode = pi->mode;
2522 if (result == L2CAP_CONF_SUCCESS)
2523 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2525 rsp->scid = cpu_to_le16(pi->dcid);
2526 rsp->result = cpu_to_le16(result);
2527 rsp->flags = cpu_to_le16(0x0000);
2532 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2534 struct l2cap_pinfo *pi = l2cap_pi(sk);
2535 struct l2cap_conf_req *req = data;
2536 void *ptr = req->data;
2539 struct l2cap_conf_rfc rfc;
2541 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2543 while (len >= L2CAP_CONF_OPT_SIZE) {
2544 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2547 case L2CAP_CONF_MTU:
2548 if (val < L2CAP_DEFAULT_MIN_MTU) {
2549 *result = L2CAP_CONF_UNACCEPT;
2550 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2556 case L2CAP_CONF_FLUSH_TO:
2558 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2562 case L2CAP_CONF_RFC:
2563 if (olen == sizeof(rfc))
2564 memcpy(&rfc, (void *)val, olen);
2566 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2567 rfc.mode != pi->mode)
2568 return -ECONNREFUSED;
2570 pi->mode = rfc.mode;
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2574 sizeof(rfc), (unsigned long) &rfc);
2579 if (*result == L2CAP_CONF_SUCCESS) {
2581 case L2CAP_MODE_ERTM:
2582 pi->remote_tx_win = rfc.txwin_size;
2583 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2584 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2585 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2587 case L2CAP_MODE_STREAMING:
2588 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2592 req->dcid = cpu_to_le16(pi->dcid);
2593 req->flags = cpu_to_le16(0x0000);
2598 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2603 BT_DBG("sk %p", sk);
2605 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2606 rsp->result = cpu_to_le16(result);
2607 rsp->flags = cpu_to_le16(flags);
2612 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2614 struct l2cap_pinfo *pi = l2cap_pi(sk);
2617 struct l2cap_conf_rfc rfc;
2619 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2621 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2624 while (len >= L2CAP_CONF_OPT_SIZE) {
2625 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2628 case L2CAP_CONF_RFC:
2629 if (olen == sizeof(rfc))
2630 memcpy(&rfc, (void *)val, olen);
2637 case L2CAP_MODE_ERTM:
2638 pi->remote_tx_win = rfc.txwin_size;
2639 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2640 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2641 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2643 case L2CAP_MODE_STREAMING:
2644 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2648 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2650 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2652 if (rej->reason != 0x0000)
2655 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2656 cmd->ident == conn->info_ident) {
2657 del_timer(&conn->info_timer);
2659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2660 conn->info_ident = 0;
2662 l2cap_conn_start(conn);
2668 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2670 struct l2cap_chan_list *list = &conn->chan_list;
2671 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2672 struct l2cap_conn_rsp rsp;
2673 struct sock *sk, *parent;
2674 int result, status = L2CAP_CS_NO_INFO;
2676 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2677 __le16 psm = req->psm;
2679 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2681 /* Check if we have socket listening on psm */
2682 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2684 result = L2CAP_CR_BAD_PSM;
2688 /* Check if the ACL is secure enough (if not SDP) */
2689 if (psm != cpu_to_le16(0x0001) &&
2690 !hci_conn_check_link_mode(conn->hcon)) {
2691 conn->disc_reason = 0x05;
2692 result = L2CAP_CR_SEC_BLOCK;
2696 result = L2CAP_CR_NO_MEM;
2698 /* Check for backlog size */
2699 if (sk_acceptq_is_full(parent)) {
2700 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2704 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2708 write_lock_bh(&list->lock);
2710 /* Check if we already have channel with that dcid */
2711 if (__l2cap_get_chan_by_dcid(list, scid)) {
2712 write_unlock_bh(&list->lock);
2713 sock_set_flag(sk, SOCK_ZAPPED);
2714 l2cap_sock_kill(sk);
2718 hci_conn_hold(conn->hcon);
2720 l2cap_sock_init(sk, parent);
2721 bacpy(&bt_sk(sk)->src, conn->src);
2722 bacpy(&bt_sk(sk)->dst, conn->dst);
2723 l2cap_pi(sk)->psm = psm;
2724 l2cap_pi(sk)->dcid = scid;
2726 __l2cap_chan_add(conn, sk, parent);
2727 dcid = l2cap_pi(sk)->scid;
2729 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2731 l2cap_pi(sk)->ident = cmd->ident;
2733 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2734 if (l2cap_check_security(sk)) {
2735 if (bt_sk(sk)->defer_setup) {
2736 sk->sk_state = BT_CONNECT2;
2737 result = L2CAP_CR_PEND;
2738 status = L2CAP_CS_AUTHOR_PEND;
2739 parent->sk_data_ready(parent, 0);
2741 sk->sk_state = BT_CONFIG;
2742 result = L2CAP_CR_SUCCESS;
2743 status = L2CAP_CS_NO_INFO;
2746 sk->sk_state = BT_CONNECT2;
2747 result = L2CAP_CR_PEND;
2748 status = L2CAP_CS_AUTHEN_PEND;
2751 sk->sk_state = BT_CONNECT2;
2752 result = L2CAP_CR_PEND;
2753 status = L2CAP_CS_NO_INFO;
2756 write_unlock_bh(&list->lock);
2759 bh_unlock_sock(parent);
2762 rsp.scid = cpu_to_le16(scid);
2763 rsp.dcid = cpu_to_le16(dcid);
2764 rsp.result = cpu_to_le16(result);
2765 rsp.status = cpu_to_le16(status);
2766 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2768 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2769 struct l2cap_info_req info;
2770 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2772 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2773 conn->info_ident = l2cap_get_ident(conn);
2775 mod_timer(&conn->info_timer, jiffies +
2776 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2778 l2cap_send_cmd(conn, conn->info_ident,
2779 L2CAP_INFO_REQ, sizeof(info), &info);
2785 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2787 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2788 u16 scid, dcid, result, status;
2792 scid = __le16_to_cpu(rsp->scid);
2793 dcid = __le16_to_cpu(rsp->dcid);
2794 result = __le16_to_cpu(rsp->result);
2795 status = __le16_to_cpu(rsp->status);
2797 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2800 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2804 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2810 case L2CAP_CR_SUCCESS:
2811 sk->sk_state = BT_CONFIG;
2812 l2cap_pi(sk)->ident = 0;
2813 l2cap_pi(sk)->dcid = dcid;
2814 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2816 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2819 l2cap_build_conf_req(sk, req), req);
2820 l2cap_pi(sk)->num_conf_req++;
2824 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2828 l2cap_chan_del(sk, ECONNREFUSED);
2836 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2838 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2844 dcid = __le16_to_cpu(req->dcid);
2845 flags = __le16_to_cpu(req->flags);
2847 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2849 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2853 if (sk->sk_state == BT_DISCONN)
2856 /* Reject if config buffer is too small. */
2857 len = cmd_len - sizeof(*req);
2858 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2859 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2860 l2cap_build_conf_rsp(sk, rsp,
2861 L2CAP_CONF_REJECT, flags), rsp);
2866 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2867 l2cap_pi(sk)->conf_len += len;
2869 if (flags & 0x0001) {
2870 /* Incomplete config. Send empty response. */
2871 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2872 l2cap_build_conf_rsp(sk, rsp,
2873 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2877 /* Complete config. */
2878 len = l2cap_parse_conf_req(sk, rsp);
2880 l2cap_send_disconn_req(conn, sk);
2884 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2885 l2cap_pi(sk)->num_conf_rsp++;
2887 /* Reset config buffer. */
2888 l2cap_pi(sk)->conf_len = 0;
2890 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2893 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2894 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2895 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2896 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2898 sk->sk_state = BT_CONNECTED;
2900 l2cap_pi(sk)->next_tx_seq = 0;
2901 l2cap_pi(sk)->expected_tx_seq = 0;
2902 __skb_queue_head_init(TX_QUEUE(sk));
2903 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2904 l2cap_ertm_init(sk);
2906 l2cap_chan_ready(sk);
2910 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2912 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2913 l2cap_build_conf_req(sk, buf), buf);
2914 l2cap_pi(sk)->num_conf_req++;
2922 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2924 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2925 u16 scid, flags, result;
2927 int len = cmd->len - sizeof(*rsp);
2929 scid = __le16_to_cpu(rsp->scid);
2930 flags = __le16_to_cpu(rsp->flags);
2931 result = __le16_to_cpu(rsp->result);
2933 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2934 scid, flags, result);
2936 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2941 case L2CAP_CONF_SUCCESS:
2942 l2cap_conf_rfc_get(sk, rsp->data, len);
2945 case L2CAP_CONF_UNACCEPT:
2946 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2949 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2950 l2cap_send_disconn_req(conn, sk);
2954 /* throw out any old stored conf requests */
2955 result = L2CAP_CONF_SUCCESS;
2956 len = l2cap_parse_conf_rsp(sk, rsp->data,
2959 l2cap_send_disconn_req(conn, sk);
2963 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2964 L2CAP_CONF_REQ, len, req);
2965 l2cap_pi(sk)->num_conf_req++;
2966 if (result != L2CAP_CONF_SUCCESS)
2972 sk->sk_state = BT_DISCONN;
2973 sk->sk_err = ECONNRESET;
2974 l2cap_sock_set_timer(sk, HZ * 5);
2975 l2cap_send_disconn_req(conn, sk);
2982 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2984 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2985 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2986 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2987 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2989 sk->sk_state = BT_CONNECTED;
2990 l2cap_pi(sk)->next_tx_seq = 0;
2991 l2cap_pi(sk)->expected_tx_seq = 0;
2992 __skb_queue_head_init(TX_QUEUE(sk));
2993 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2994 l2cap_ertm_init(sk);
2996 l2cap_chan_ready(sk);
3004 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3006 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3007 struct l2cap_disconn_rsp rsp;
3011 scid = __le16_to_cpu(req->scid);
3012 dcid = __le16_to_cpu(req->dcid);
3014 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3016 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3020 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3021 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3022 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3024 sk->sk_shutdown = SHUTDOWN_MASK;
3026 skb_queue_purge(TX_QUEUE(sk));
3028 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3029 skb_queue_purge(SREJ_QUEUE(sk));
3030 del_timer(&l2cap_pi(sk)->retrans_timer);
3031 del_timer(&l2cap_pi(sk)->monitor_timer);
3032 del_timer(&l2cap_pi(sk)->ack_timer);
3035 l2cap_chan_del(sk, ECONNRESET);
3038 l2cap_sock_kill(sk);
3042 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3044 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3048 scid = __le16_to_cpu(rsp->scid);
3049 dcid = __le16_to_cpu(rsp->dcid);
3051 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3053 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3057 skb_queue_purge(TX_QUEUE(sk));
3059 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3060 skb_queue_purge(SREJ_QUEUE(sk));
3061 del_timer(&l2cap_pi(sk)->retrans_timer);
3062 del_timer(&l2cap_pi(sk)->monitor_timer);
3063 del_timer(&l2cap_pi(sk)->ack_timer);
3066 l2cap_chan_del(sk, 0);
3069 l2cap_sock_kill(sk);
3073 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3075 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3078 type = __le16_to_cpu(req->type);
3080 BT_DBG("type 0x%4.4x", type);
3082 if (type == L2CAP_IT_FEAT_MASK) {
3084 u32 feat_mask = l2cap_feat_mask;
3085 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3086 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3087 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3089 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3091 put_unaligned_le32(feat_mask, rsp->data);
3092 l2cap_send_cmd(conn, cmd->ident,
3093 L2CAP_INFO_RSP, sizeof(buf), buf);
3094 } else if (type == L2CAP_IT_FIXED_CHAN) {
3096 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3097 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3098 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3099 memcpy(buf + 4, l2cap_fixed_chan, 8);
3100 l2cap_send_cmd(conn, cmd->ident,
3101 L2CAP_INFO_RSP, sizeof(buf), buf);
3103 struct l2cap_info_rsp rsp;
3104 rsp.type = cpu_to_le16(type);
3105 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3106 l2cap_send_cmd(conn, cmd->ident,
3107 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3113 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3115 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3118 type = __le16_to_cpu(rsp->type);
3119 result = __le16_to_cpu(rsp->result);
3121 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3123 del_timer(&conn->info_timer);
3125 if (type == L2CAP_IT_FEAT_MASK) {
3126 conn->feat_mask = get_unaligned_le32(rsp->data);
3128 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3129 struct l2cap_info_req req;
3130 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3132 conn->info_ident = l2cap_get_ident(conn);
3134 l2cap_send_cmd(conn, conn->info_ident,
3135 L2CAP_INFO_REQ, sizeof(req), &req);
3137 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3138 conn->info_ident = 0;
3140 l2cap_conn_start(conn);
3142 } else if (type == L2CAP_IT_FIXED_CHAN) {
3143 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3144 conn->info_ident = 0;
3146 l2cap_conn_start(conn);
3152 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3154 u8 *data = skb->data;
3156 struct l2cap_cmd_hdr cmd;
3159 l2cap_raw_recv(conn, skb);
3161 while (len >= L2CAP_CMD_HDR_SIZE) {
3163 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3164 data += L2CAP_CMD_HDR_SIZE;
3165 len -= L2CAP_CMD_HDR_SIZE;
3167 cmd_len = le16_to_cpu(cmd.len);
3169 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3171 if (cmd_len > len || !cmd.ident) {
3172 BT_DBG("corrupted command");
3177 case L2CAP_COMMAND_REJ:
3178 l2cap_command_rej(conn, &cmd, data);
3181 case L2CAP_CONN_REQ:
3182 err = l2cap_connect_req(conn, &cmd, data);
3185 case L2CAP_CONN_RSP:
3186 err = l2cap_connect_rsp(conn, &cmd, data);
3189 case L2CAP_CONF_REQ:
3190 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3193 case L2CAP_CONF_RSP:
3194 err = l2cap_config_rsp(conn, &cmd, data);
3197 case L2CAP_DISCONN_REQ:
3198 err = l2cap_disconnect_req(conn, &cmd, data);
3201 case L2CAP_DISCONN_RSP:
3202 err = l2cap_disconnect_rsp(conn, &cmd, data);
3205 case L2CAP_ECHO_REQ:
3206 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3209 case L2CAP_ECHO_RSP:
3212 case L2CAP_INFO_REQ:
3213 err = l2cap_information_req(conn, &cmd, data);
3216 case L2CAP_INFO_RSP:
3217 err = l2cap_information_rsp(conn, &cmd, data);
3221 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3227 struct l2cap_cmd_rej rej;
3228 BT_DBG("error %d", err);
3230 /* FIXME: Map err to a valid reason */
3231 rej.reason = cpu_to_le16(0);
3232 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3242 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3244 u16 our_fcs, rcv_fcs;
3245 int hdr_size = L2CAP_HDR_SIZE + 2;
3247 if (pi->fcs == L2CAP_FCS_CRC16) {
3248 skb_trim(skb, skb->len - 2);
3249 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3250 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3252 if (our_fcs != rcv_fcs)
3258 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3260 struct l2cap_pinfo *pi = l2cap_pi(sk);
3263 pi->frames_sent = 0;
3264 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3266 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3268 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3269 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3270 l2cap_send_sframe(pi, control);
3271 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3274 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3275 __mod_retrans_timer();
3277 l2cap_ertm_send(sk);
3279 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3280 pi->frames_sent == 0) {
3281 control |= L2CAP_SUPER_RCV_READY;
3282 l2cap_send_sframe(pi, control);
3286 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3288 struct sk_buff *next_skb;
3290 bt_cb(skb)->tx_seq = tx_seq;
3291 bt_cb(skb)->sar = sar;
3293 next_skb = skb_peek(SREJ_QUEUE(sk));
3295 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3300 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3301 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3305 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3308 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3310 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3313 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3315 struct l2cap_pinfo *pi = l2cap_pi(sk);
3316 struct sk_buff *_skb;
3319 switch (control & L2CAP_CTRL_SAR) {
3320 case L2CAP_SDU_UNSEGMENTED:
3321 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3326 err = sock_queue_rcv_skb(sk, skb);
3332 case L2CAP_SDU_START:
3333 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3338 pi->sdu_len = get_unaligned_le16(skb->data);
3341 if (pi->sdu_len > pi->imtu) {
3346 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3352 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3354 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3355 pi->partial_sdu_len = skb->len;
3359 case L2CAP_SDU_CONTINUE:
3360 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3363 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3365 pi->partial_sdu_len += skb->len;
3366 if (pi->partial_sdu_len > pi->sdu_len)
3374 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3377 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3379 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3380 pi->partial_sdu_len += skb->len;
3382 if (pi->partial_sdu_len > pi->imtu)
3385 if (pi->partial_sdu_len == pi->sdu_len) {
3386 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3387 err = sock_queue_rcv_skb(sk, _skb);
3402 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3404 struct sk_buff *skb;
3407 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3408 if (bt_cb(skb)->tx_seq != tx_seq)
3411 skb = skb_dequeue(SREJ_QUEUE(sk));
3412 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3413 l2cap_sar_reassembly_sdu(sk, skb, control);
3414 l2cap_pi(sk)->buffer_seq_srej =
3415 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3420 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3422 struct l2cap_pinfo *pi = l2cap_pi(sk);
3423 struct srej_list *l, *tmp;
3426 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3427 if (l->tx_seq == tx_seq) {
3432 control = L2CAP_SUPER_SELECT_REJECT;
3433 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3434 l2cap_send_sframe(pi, control);
3436 list_add_tail(&l->list, SREJ_LIST(sk));
3440 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3442 struct l2cap_pinfo *pi = l2cap_pi(sk);
3443 struct srej_list *new;
3446 while (tx_seq != pi->expected_tx_seq) {
3447 control = L2CAP_SUPER_SELECT_REJECT;
3448 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3449 l2cap_send_sframe(pi, control);
3451 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3452 new->tx_seq = pi->expected_tx_seq++;
3453 list_add_tail(&new->list, SREJ_LIST(sk));
3455 pi->expected_tx_seq++;
3458 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3460 struct l2cap_pinfo *pi = l2cap_pi(sk);
3461 u8 tx_seq = __get_txseq(rx_control);
3462 u8 req_seq = __get_reqseq(rx_control);
3463 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3466 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3468 if (L2CAP_CTRL_FINAL & rx_control) {
3469 del_timer(&pi->monitor_timer);
3470 if (pi->unacked_frames > 0)
3471 __mod_retrans_timer();
3472 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3475 pi->expected_ack_seq = req_seq;
3476 l2cap_drop_acked_frames(sk);
3478 if (tx_seq == pi->expected_tx_seq)
3481 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3482 struct srej_list *first;
3484 first = list_first_entry(SREJ_LIST(sk),
3485 struct srej_list, list);
3486 if (tx_seq == first->tx_seq) {
3487 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3488 l2cap_check_srej_gap(sk, tx_seq);
3490 list_del(&first->list);
3493 if (list_empty(SREJ_LIST(sk))) {
3494 pi->buffer_seq = pi->buffer_seq_srej;
3495 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3498 struct srej_list *l;
3499 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3501 list_for_each_entry(l, SREJ_LIST(sk), list) {
3502 if (l->tx_seq == tx_seq) {
3503 l2cap_resend_srejframe(sk, tx_seq);
3507 l2cap_send_srejframe(sk, tx_seq);
3510 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3512 INIT_LIST_HEAD(SREJ_LIST(sk));
3513 pi->buffer_seq_srej = pi->buffer_seq;
3515 __skb_queue_head_init(SREJ_QUEUE(sk));
3516 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3518 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3520 l2cap_send_srejframe(sk, tx_seq);
3525 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3527 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3528 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3532 if (rx_control & L2CAP_CTRL_FINAL) {
3533 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3534 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3536 sk->sk_send_head = TX_QUEUE(sk)->next;
3537 pi->next_tx_seq = pi->expected_ack_seq;
3538 l2cap_ertm_send(sk);
3542 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3544 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3550 pi->num_to_ack = (pi->num_to_ack + 1) % L2CAP_DEFAULT_NUM_TO_ACK;
3551 if (pi->num_to_ack == L2CAP_DEFAULT_NUM_TO_ACK - 1)
3557 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3559 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 pi->expected_ack_seq = __get_reqseq(rx_control);
3562 l2cap_drop_acked_frames(sk);
3564 if (rx_control & L2CAP_CTRL_POLL) {
3565 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3566 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3567 (pi->unacked_frames > 0))
3568 __mod_retrans_timer();
3570 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3571 l2cap_send_srejtail(sk);
3573 l2cap_send_i_or_rr_or_rnr(sk);
3574 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3577 } else if (rx_control & L2CAP_CTRL_FINAL) {
3578 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3580 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3581 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3583 sk->sk_send_head = TX_QUEUE(sk)->next;
3584 pi->next_tx_seq = pi->expected_ack_seq;
3585 l2cap_ertm_send(sk);
3589 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3590 (pi->unacked_frames > 0))
3591 __mod_retrans_timer();
3593 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3594 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3597 l2cap_ertm_send(sk);
3601 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3603 struct l2cap_pinfo *pi = l2cap_pi(sk);
3604 u8 tx_seq = __get_reqseq(rx_control);
3606 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3608 pi->expected_ack_seq = tx_seq;
3609 l2cap_drop_acked_frames(sk);
3611 if (rx_control & L2CAP_CTRL_FINAL) {
3612 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3613 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3615 sk->sk_send_head = TX_QUEUE(sk)->next;
3616 pi->next_tx_seq = pi->expected_ack_seq;
3617 l2cap_ertm_send(sk);
3620 sk->sk_send_head = TX_QUEUE(sk)->next;
3621 pi->next_tx_seq = pi->expected_ack_seq;
3622 l2cap_ertm_send(sk);
3624 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3625 pi->srej_save_reqseq = tx_seq;
3626 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3630 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3632 struct l2cap_pinfo *pi = l2cap_pi(sk);
3633 u8 tx_seq = __get_reqseq(rx_control);
3635 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3637 if (rx_control & L2CAP_CTRL_POLL) {
3638 pi->expected_ack_seq = tx_seq;
3639 l2cap_drop_acked_frames(sk);
3640 l2cap_retransmit_frame(sk, tx_seq);
3641 l2cap_ertm_send(sk);
3642 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3643 pi->srej_save_reqseq = tx_seq;
3644 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3646 } else if (rx_control & L2CAP_CTRL_FINAL) {
3647 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3648 pi->srej_save_reqseq == tx_seq)
3649 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3651 l2cap_retransmit_frame(sk, tx_seq);
3653 l2cap_retransmit_frame(sk, tx_seq);
3654 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3655 pi->srej_save_reqseq = tx_seq;
3656 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3661 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3663 struct l2cap_pinfo *pi = l2cap_pi(sk);
3664 u8 tx_seq = __get_reqseq(rx_control);
3666 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3667 pi->expected_ack_seq = tx_seq;
3668 l2cap_drop_acked_frames(sk);
3670 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3671 del_timer(&pi->retrans_timer);
3672 if (rx_control & L2CAP_CTRL_POLL) {
3673 u16 control = L2CAP_CTRL_FINAL;
3674 l2cap_send_rr_or_rnr(pi, control);
3679 if (rx_control & L2CAP_CTRL_POLL)
3680 l2cap_send_srejtail(sk);
3682 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3685 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3687 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3689 if (L2CAP_CTRL_FINAL & rx_control) {
3690 del_timer(&l2cap_pi(sk)->monitor_timer);
3691 if (l2cap_pi(sk)->unacked_frames > 0)
3692 __mod_retrans_timer();
3693 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3696 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3697 case L2CAP_SUPER_RCV_READY:
3698 l2cap_data_channel_rrframe(sk, rx_control);
3701 case L2CAP_SUPER_REJECT:
3702 l2cap_data_channel_rejframe(sk, rx_control);
3705 case L2CAP_SUPER_SELECT_REJECT:
3706 l2cap_data_channel_srejframe(sk, rx_control);
3709 case L2CAP_SUPER_RCV_NOT_READY:
3710 l2cap_data_channel_rnrframe(sk, rx_control);
3718 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3721 struct l2cap_pinfo *pi;
3725 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3727 BT_DBG("unknown cid 0x%4.4x", cid);
3733 BT_DBG("sk %p, len %d", sk, skb->len);
3735 if (sk->sk_state != BT_CONNECTED)
3739 case L2CAP_MODE_BASIC:
3740 /* If socket recv buffers overflows we drop data here
3741 * which is *bad* because L2CAP has to be reliable.
3742 * But we don't have any other choice. L2CAP doesn't
3743 * provide flow control mechanism. */
3745 if (pi->imtu < skb->len)
3748 if (!sock_queue_rcv_skb(sk, skb))
3752 case L2CAP_MODE_ERTM:
3753 control = get_unaligned_le16(skb->data);
3757 if (__is_sar_start(control))
3760 if (pi->fcs == L2CAP_FCS_CRC16)
3764 * We can just drop the corrupted I-frame here.
3765 * Receiver will miss it and start proper recovery
3766 * procedures and ask retransmission.
3771 if (l2cap_check_fcs(pi, skb))
3774 if (__is_iframe(control)) {
3778 l2cap_data_channel_iframe(sk, control, skb);
3783 l2cap_data_channel_sframe(sk, control, skb);
3788 case L2CAP_MODE_STREAMING:
3789 control = get_unaligned_le16(skb->data);
3793 if (__is_sar_start(control))
3796 if (pi->fcs == L2CAP_FCS_CRC16)
3799 if (len > pi->mps || len < 4 || __is_sframe(control))
3802 if (l2cap_check_fcs(pi, skb))
3805 tx_seq = __get_txseq(control);
3807 if (pi->expected_tx_seq == tx_seq)
3808 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3810 pi->expected_tx_seq = (tx_seq + 1) % 64;
3812 l2cap_sar_reassembly_sdu(sk, skb, control);
3817 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3831 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3835 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3839 BT_DBG("sk %p, len %d", sk, skb->len);
3841 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3844 if (l2cap_pi(sk)->imtu < skb->len)
3847 if (!sock_queue_rcv_skb(sk, skb))
3859 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3861 struct l2cap_hdr *lh = (void *) skb->data;
3865 skb_pull(skb, L2CAP_HDR_SIZE);
3866 cid = __le16_to_cpu(lh->cid);
3867 len = __le16_to_cpu(lh->len);
3869 if (len != skb->len) {
3874 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3877 case L2CAP_CID_SIGNALING:
3878 l2cap_sig_channel(conn, skb);
3881 case L2CAP_CID_CONN_LESS:
3882 psm = get_unaligned_le16(skb->data);
3884 l2cap_conless_channel(conn, psm, skb);
3888 l2cap_data_channel(conn, cid, skb);
3893 /* ---- L2CAP interface with lower layer (HCI) ---- */
3895 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3897 int exact = 0, lm1 = 0, lm2 = 0;
3898 register struct sock *sk;
3899 struct hlist_node *node;
3901 if (type != ACL_LINK)
3904 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3906 /* Find listening sockets and check their link_mode */
3907 read_lock(&l2cap_sk_list.lock);
3908 sk_for_each(sk, node, &l2cap_sk_list.head) {
3909 if (sk->sk_state != BT_LISTEN)
3912 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3913 lm1 |= HCI_LM_ACCEPT;
3914 if (l2cap_pi(sk)->role_switch)
3915 lm1 |= HCI_LM_MASTER;
3917 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3918 lm2 |= HCI_LM_ACCEPT;
3919 if (l2cap_pi(sk)->role_switch)
3920 lm2 |= HCI_LM_MASTER;
3923 read_unlock(&l2cap_sk_list.lock);
3925 return exact ? lm1 : lm2;
3928 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3930 struct l2cap_conn *conn;
3932 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3934 if (hcon->type != ACL_LINK)
3938 conn = l2cap_conn_add(hcon, status);
3940 l2cap_conn_ready(conn);
3942 l2cap_conn_del(hcon, bt_err(status));
3947 static int l2cap_disconn_ind(struct hci_conn *hcon)
3949 struct l2cap_conn *conn = hcon->l2cap_data;
3951 BT_DBG("hcon %p", hcon);
3953 if (hcon->type != ACL_LINK || !conn)
3956 return conn->disc_reason;
3959 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3961 BT_DBG("hcon %p reason %d", hcon, reason);
3963 if (hcon->type != ACL_LINK)
3966 l2cap_conn_del(hcon, bt_err(reason));
3971 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3973 if (sk->sk_type != SOCK_SEQPACKET)
3976 if (encrypt == 0x00) {
3977 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
3978 l2cap_sock_clear_timer(sk);
3979 l2cap_sock_set_timer(sk, HZ * 5);
3980 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
3981 __l2cap_sock_close(sk, ECONNREFUSED);
3983 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
3984 l2cap_sock_clear_timer(sk);
3988 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3990 struct l2cap_chan_list *l;
3991 struct l2cap_conn *conn = hcon->l2cap_data;
3997 l = &conn->chan_list;
3999 BT_DBG("conn %p", conn);
4001 read_lock(&l->lock);
4003 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4006 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4011 if (!status && (sk->sk_state == BT_CONNECTED ||
4012 sk->sk_state == BT_CONFIG)) {
4013 l2cap_check_encryption(sk, encrypt);
4018 if (sk->sk_state == BT_CONNECT) {
4020 struct l2cap_conn_req req;
4021 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4022 req.psm = l2cap_pi(sk)->psm;
4024 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4026 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4027 L2CAP_CONN_REQ, sizeof(req), &req);
4029 l2cap_sock_clear_timer(sk);
4030 l2cap_sock_set_timer(sk, HZ / 10);
4032 } else if (sk->sk_state == BT_CONNECT2) {
4033 struct l2cap_conn_rsp rsp;
4037 sk->sk_state = BT_CONFIG;
4038 result = L2CAP_CR_SUCCESS;
4040 sk->sk_state = BT_DISCONN;
4041 l2cap_sock_set_timer(sk, HZ / 10);
4042 result = L2CAP_CR_SEC_BLOCK;
4045 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4046 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4047 rsp.result = cpu_to_le16(result);
4048 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4049 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4050 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4056 read_unlock(&l->lock);
4061 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4063 struct l2cap_conn *conn = hcon->l2cap_data;
4065 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4068 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4070 if (flags & ACL_START) {
4071 struct l2cap_hdr *hdr;
4075 BT_ERR("Unexpected start frame (len %d)", skb->len);
4076 kfree_skb(conn->rx_skb);
4077 conn->rx_skb = NULL;
4079 l2cap_conn_unreliable(conn, ECOMM);
4083 BT_ERR("Frame is too short (len %d)", skb->len);
4084 l2cap_conn_unreliable(conn, ECOMM);
4088 hdr = (struct l2cap_hdr *) skb->data;
4089 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4091 if (len == skb->len) {
4092 /* Complete frame received */
4093 l2cap_recv_frame(conn, skb);
4097 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4099 if (skb->len > len) {
4100 BT_ERR("Frame is too long (len %d, expected len %d)",
4102 l2cap_conn_unreliable(conn, ECOMM);
4106 /* Allocate skb for the complete frame (with header) */
4107 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4111 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4113 conn->rx_len = len - skb->len;
4115 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4117 if (!conn->rx_len) {
4118 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4119 l2cap_conn_unreliable(conn, ECOMM);
4123 if (skb->len > conn->rx_len) {
4124 BT_ERR("Fragment is too long (len %d, expected %d)",
4125 skb->len, conn->rx_len);
4126 kfree_skb(conn->rx_skb);
4127 conn->rx_skb = NULL;
4129 l2cap_conn_unreliable(conn, ECOMM);
4133 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4135 conn->rx_len -= skb->len;
4137 if (!conn->rx_len) {
4138 /* Complete frame received */
4139 l2cap_recv_frame(conn, conn->rx_skb);
4140 conn->rx_skb = NULL;
4149 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4152 struct hlist_node *node;
4154 read_lock_bh(&l2cap_sk_list.lock);
4156 sk_for_each(sk, node, &l2cap_sk_list.head) {
4157 struct l2cap_pinfo *pi = l2cap_pi(sk);
4159 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4160 batostr(&bt_sk(sk)->src),
4161 batostr(&bt_sk(sk)->dst),
4162 sk->sk_state, __le16_to_cpu(pi->psm),
4164 pi->imtu, pi->omtu, pi->sec_level);
4167 read_unlock_bh(&l2cap_sk_list.lock);
4172 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4174 return single_open(file, l2cap_debugfs_show, inode->i_private);
4177 static const struct file_operations l2cap_debugfs_fops = {
4178 .open = l2cap_debugfs_open,
4180 .llseek = seq_lseek,
4181 .release = single_release,
4184 static struct dentry *l2cap_debugfs;
4186 static const struct proto_ops l2cap_sock_ops = {
4187 .family = PF_BLUETOOTH,
4188 .owner = THIS_MODULE,
4189 .release = l2cap_sock_release,
4190 .bind = l2cap_sock_bind,
4191 .connect = l2cap_sock_connect,
4192 .listen = l2cap_sock_listen,
4193 .accept = l2cap_sock_accept,
4194 .getname = l2cap_sock_getname,
4195 .sendmsg = l2cap_sock_sendmsg,
4196 .recvmsg = l2cap_sock_recvmsg,
4197 .poll = bt_sock_poll,
4198 .ioctl = bt_sock_ioctl,
4199 .mmap = sock_no_mmap,
4200 .socketpair = sock_no_socketpair,
4201 .shutdown = l2cap_sock_shutdown,
4202 .setsockopt = l2cap_sock_setsockopt,
4203 .getsockopt = l2cap_sock_getsockopt
4206 static const struct net_proto_family l2cap_sock_family_ops = {
4207 .family = PF_BLUETOOTH,
4208 .owner = THIS_MODULE,
4209 .create = l2cap_sock_create,
4212 static struct hci_proto l2cap_hci_proto = {
4214 .id = HCI_PROTO_L2CAP,
4215 .connect_ind = l2cap_connect_ind,
4216 .connect_cfm = l2cap_connect_cfm,
4217 .disconn_ind = l2cap_disconn_ind,
4218 .disconn_cfm = l2cap_disconn_cfm,
4219 .security_cfm = l2cap_security_cfm,
4220 .recv_acldata = l2cap_recv_acldata
4223 static int __init l2cap_init(void)
4227 err = proto_register(&l2cap_proto, 0);
4231 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4233 BT_ERR("L2CAP socket registration failed");
4237 err = hci_register_proto(&l2cap_hci_proto);
4239 BT_ERR("L2CAP protocol registration failed");
4240 bt_sock_unregister(BTPROTO_L2CAP);
4245 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4246 bt_debugfs, NULL, &l2cap_debugfs_fops);
4248 BT_ERR("Failed to create L2CAP debug file");
4251 BT_INFO("L2CAP ver %s", VERSION);
4252 BT_INFO("L2CAP socket layer initialized");
4257 proto_unregister(&l2cap_proto);
4261 static void __exit l2cap_exit(void)
4263 debugfs_remove(l2cap_debugfs);
4265 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4266 BT_ERR("L2CAP socket unregistration failed");
4268 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4269 BT_ERR("L2CAP protocol unregistration failed");
4271 proto_unregister(&l2cap_proto);
4274 void l2cap_load(void)
4276 /* Dummy function to trigger automatic L2CAP module loading by
4277 * other modules that use L2CAP sockets but don't use any other
4278 * symbols from it. */
4281 EXPORT_SYMBOL(l2cap_load);
4283 module_init(l2cap_init);
4284 module_exit(l2cap_exit);
4286 module_param(enable_ertm, bool, 0644);
4287 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4289 module_param(max_transmit, uint, 0644);
4290 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4292 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4293 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4294 MODULE_VERSION(VERSION);
4295 MODULE_LICENSE("GPL");
4296 MODULE_ALIAS("bt-proto-0");