2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
59 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
60 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void __l2cap_sock_close(struct sock *sk, int reason);
72 static void l2cap_sock_close(struct sock *sk);
73 static void l2cap_sock_kill(struct sock *sk);
75 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
76 u8 code, u8 ident, u16 dlen, void *data);
78 /* ---- L2CAP timers ---- */
79 static void l2cap_sock_timeout(unsigned long arg)
81 struct sock *sk = (struct sock *) arg;
84 BT_DBG("sock %p state %d", sk, sk->sk_state);
88 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
89 reason = ECONNREFUSED;
90 else if (sk->sk_state == BT_CONNECT &&
91 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
92 reason = ECONNREFUSED;
96 __l2cap_sock_close(sk, reason);
104 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
106 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
107 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
110 static void l2cap_sock_clear_timer(struct sock *sk)
112 BT_DBG("sock %p state %d", sk, sk->sk_state);
113 sk_stop_timer(sk, &sk->sk_timer);
116 /* ---- L2CAP channels ---- */
117 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
120 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
121 if (l2cap_pi(s)->dcid == cid)
127 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
130 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
131 if (l2cap_pi(s)->scid == cid)
137 /* Find channel with given SCID.
138 * Returns locked socket */
139 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
143 s = __l2cap_get_chan_by_scid(l, cid);
146 read_unlock(&l->lock);
150 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
154 if (l2cap_pi(s)->ident == ident)
160 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
164 s = __l2cap_get_chan_by_ident(l, ident);
167 read_unlock(&l->lock);
171 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
173 u16 cid = L2CAP_CID_DYN_START;
175 for (; cid < L2CAP_CID_DYN_END; cid++) {
176 if (!__l2cap_get_chan_by_scid(l, cid))
183 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
188 l2cap_pi(l->head)->prev_c = sk;
190 l2cap_pi(sk)->next_c = l->head;
191 l2cap_pi(sk)->prev_c = NULL;
195 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
197 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
199 write_lock_bh(&l->lock);
204 l2cap_pi(next)->prev_c = prev;
206 l2cap_pi(prev)->next_c = next;
207 write_unlock_bh(&l->lock);
212 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
214 struct l2cap_chan_list *l = &conn->chan_list;
216 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
217 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
219 conn->disc_reason = 0x13;
221 l2cap_pi(sk)->conn = conn;
223 if (sk->sk_type == SOCK_SEQPACKET) {
224 /* Alloc CID for connection-oriented socket */
225 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
226 } else if (sk->sk_type == SOCK_DGRAM) {
227 /* Connectionless socket */
228 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
229 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
230 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
232 /* Raw socket can send/recv signalling messages only */
233 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
234 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
238 __l2cap_chan_link(l, sk);
241 bt_accept_enqueue(parent, sk);
245 * Must be called on the locked socket. */
246 static void l2cap_chan_del(struct sock *sk, int err)
248 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
249 struct sock *parent = bt_sk(sk)->parent;
251 l2cap_sock_clear_timer(sk);
253 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
256 /* Unlink from channel list */
257 l2cap_chan_unlink(&conn->chan_list, sk);
258 l2cap_pi(sk)->conn = NULL;
259 hci_conn_put(conn->hcon);
262 sk->sk_state = BT_CLOSED;
263 sock_set_flag(sk, SOCK_ZAPPED);
269 bt_accept_unlink(sk);
270 parent->sk_data_ready(parent, 0);
272 sk->sk_state_change(sk);
275 /* Service level security */
276 static inline int l2cap_check_security(struct sock *sk)
278 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
281 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
282 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
283 auth_type = HCI_AT_NO_BONDING_MITM;
285 auth_type = HCI_AT_NO_BONDING;
287 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
288 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
290 switch (l2cap_pi(sk)->sec_level) {
291 case BT_SECURITY_HIGH:
292 auth_type = HCI_AT_GENERAL_BONDING_MITM;
294 case BT_SECURITY_MEDIUM:
295 auth_type = HCI_AT_GENERAL_BONDING;
298 auth_type = HCI_AT_NO_BONDING;
303 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
307 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
311 /* Get next available identificator.
312 * 1 - 128 are used by kernel.
313 * 129 - 199 are reserved.
314 * 200 - 254 are used by utilities like l2ping, etc.
317 spin_lock_bh(&conn->lock);
319 if (++conn->tx_ident > 128)
324 spin_unlock_bh(&conn->lock);
329 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
331 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
333 BT_DBG("code 0x%2.2x", code);
338 return hci_send_acl(conn->hcon, skb, 0);
341 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
344 struct l2cap_hdr *lh;
345 struct l2cap_conn *conn = pi->conn;
346 int count, hlen = L2CAP_HDR_SIZE + 2;
348 if (pi->fcs == L2CAP_FCS_CRC16)
351 BT_DBG("pi %p, control 0x%2.2x", pi, control);
353 count = min_t(unsigned int, conn->mtu, hlen);
354 control |= L2CAP_CTRL_FRAME_TYPE;
356 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
357 control |= L2CAP_CTRL_FINAL;
358 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
361 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
362 control |= L2CAP_CTRL_POLL;
363 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
366 skb = bt_skb_alloc(count, GFP_ATOMIC);
370 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
371 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
372 lh->cid = cpu_to_le16(pi->dcid);
373 put_unaligned_le16(control, skb_put(skb, 2));
375 if (pi->fcs == L2CAP_FCS_CRC16) {
376 u16 fcs = crc16(0, (u8 *)lh, count - 2);
377 put_unaligned_le16(fcs, skb_put(skb, 2));
380 return hci_send_acl(pi->conn->hcon, skb, 0);
383 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
385 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
386 control |= L2CAP_SUPER_RCV_NOT_READY;
388 control |= L2CAP_SUPER_RCV_READY;
390 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
392 return l2cap_send_sframe(pi, control);
395 static void l2cap_do_start(struct sock *sk)
397 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
399 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
400 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
403 if (l2cap_check_security(sk)) {
404 struct l2cap_conn_req req;
405 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
406 req.psm = l2cap_pi(sk)->psm;
408 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
410 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
411 L2CAP_CONN_REQ, sizeof(req), &req);
414 struct l2cap_info_req req;
415 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
417 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
418 conn->info_ident = l2cap_get_ident(conn);
420 mod_timer(&conn->info_timer, jiffies +
421 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
423 l2cap_send_cmd(conn, conn->info_ident,
424 L2CAP_INFO_REQ, sizeof(req), &req);
428 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
430 struct l2cap_disconn_req req;
432 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
433 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
434 l2cap_send_cmd(conn, l2cap_get_ident(conn),
435 L2CAP_DISCONN_REQ, sizeof(req), &req);
438 /* ---- L2CAP connections ---- */
439 static void l2cap_conn_start(struct l2cap_conn *conn)
441 struct l2cap_chan_list *l = &conn->chan_list;
444 BT_DBG("conn %p", conn);
448 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
451 if (sk->sk_type != SOCK_SEQPACKET) {
456 if (sk->sk_state == BT_CONNECT) {
457 if (l2cap_check_security(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
464 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
465 L2CAP_CONN_REQ, sizeof(req), &req);
467 } else if (sk->sk_state == BT_CONNECT2) {
468 struct l2cap_conn_rsp rsp;
469 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
470 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
472 if (l2cap_check_security(sk)) {
473 if (bt_sk(sk)->defer_setup) {
474 struct sock *parent = bt_sk(sk)->parent;
475 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
476 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
477 parent->sk_data_ready(parent, 0);
480 sk->sk_state = BT_CONFIG;
481 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
482 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
489 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
490 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
496 read_unlock(&l->lock);
499 static void l2cap_conn_ready(struct l2cap_conn *conn)
501 struct l2cap_chan_list *l = &conn->chan_list;
504 BT_DBG("conn %p", conn);
508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
511 if (sk->sk_type != SOCK_SEQPACKET) {
512 l2cap_sock_clear_timer(sk);
513 sk->sk_state = BT_CONNECTED;
514 sk->sk_state_change(sk);
515 } else if (sk->sk_state == BT_CONNECT)
521 read_unlock(&l->lock);
524 /* Notify sockets that we cannot guaranty reliability anymore */
525 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
527 struct l2cap_chan_list *l = &conn->chan_list;
530 BT_DBG("conn %p", conn);
534 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
535 if (l2cap_pi(sk)->force_reliable)
539 read_unlock(&l->lock);
542 static void l2cap_info_timeout(unsigned long arg)
544 struct l2cap_conn *conn = (void *) arg;
546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
547 conn->info_ident = 0;
549 l2cap_conn_start(conn);
552 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
554 struct l2cap_conn *conn = hcon->l2cap_data;
559 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
563 hcon->l2cap_data = conn;
566 BT_DBG("hcon %p conn %p", hcon, conn);
568 conn->mtu = hcon->hdev->acl_mtu;
569 conn->src = &hcon->hdev->bdaddr;
570 conn->dst = &hcon->dst;
574 spin_lock_init(&conn->lock);
575 rwlock_init(&conn->chan_list.lock);
577 setup_timer(&conn->info_timer, l2cap_info_timeout,
578 (unsigned long) conn);
580 conn->disc_reason = 0x13;
585 static void l2cap_conn_del(struct hci_conn *hcon, int err)
587 struct l2cap_conn *conn = hcon->l2cap_data;
593 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
595 kfree_skb(conn->rx_skb);
598 while ((sk = conn->chan_list.head)) {
600 l2cap_chan_del(sk, err);
605 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
606 del_timer_sync(&conn->info_timer);
608 hcon->l2cap_data = NULL;
612 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
614 struct l2cap_chan_list *l = &conn->chan_list;
615 write_lock_bh(&l->lock);
616 __l2cap_chan_add(conn, sk, parent);
617 write_unlock_bh(&l->lock);
620 /* ---- Socket interface ---- */
621 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
624 struct hlist_node *node;
625 sk_for_each(sk, node, &l2cap_sk_list.head)
626 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
633 /* Find socket with psm and source bdaddr.
634 * Returns closest match.
636 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
638 struct sock *sk = NULL, *sk1 = NULL;
639 struct hlist_node *node;
641 sk_for_each(sk, node, &l2cap_sk_list.head) {
642 if (state && sk->sk_state != state)
645 if (l2cap_pi(sk)->psm == psm) {
647 if (!bacmp(&bt_sk(sk)->src, src))
651 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
655 return node ? sk : sk1;
658 /* Find socket with given address (psm, src).
659 * Returns locked socket */
660 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
663 read_lock(&l2cap_sk_list.lock);
664 s = __l2cap_get_sock_by_psm(state, psm, src);
667 read_unlock(&l2cap_sk_list.lock);
671 static void l2cap_sock_destruct(struct sock *sk)
675 skb_queue_purge(&sk->sk_receive_queue);
676 skb_queue_purge(&sk->sk_write_queue);
679 static void l2cap_sock_cleanup_listen(struct sock *parent)
683 BT_DBG("parent %p", parent);
685 /* Close not yet accepted channels */
686 while ((sk = bt_accept_dequeue(parent, NULL)))
687 l2cap_sock_close(sk);
689 parent->sk_state = BT_CLOSED;
690 sock_set_flag(parent, SOCK_ZAPPED);
693 /* Kill socket (only if zapped and orphan)
694 * Must be called on unlocked socket.
696 static void l2cap_sock_kill(struct sock *sk)
698 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
701 BT_DBG("sk %p state %d", sk, sk->sk_state);
703 /* Kill poor orphan */
704 bt_sock_unlink(&l2cap_sk_list, sk);
705 sock_set_flag(sk, SOCK_DEAD);
709 static void __l2cap_sock_close(struct sock *sk, int reason)
711 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
713 switch (sk->sk_state) {
715 l2cap_sock_cleanup_listen(sk);
720 if (sk->sk_type == SOCK_SEQPACKET) {
721 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
723 sk->sk_state = BT_DISCONN;
724 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
725 l2cap_send_disconn_req(conn, sk);
727 l2cap_chan_del(sk, reason);
731 if (sk->sk_type == SOCK_SEQPACKET) {
732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
733 struct l2cap_conn_rsp rsp;
736 if (bt_sk(sk)->defer_setup)
737 result = L2CAP_CR_SEC_BLOCK;
739 result = L2CAP_CR_BAD_PSM;
741 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
742 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
743 rsp.result = cpu_to_le16(result);
744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
745 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
746 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
748 l2cap_chan_del(sk, reason);
753 l2cap_chan_del(sk, reason);
757 sock_set_flag(sk, SOCK_ZAPPED);
762 /* Must be called on unlocked socket. */
763 static void l2cap_sock_close(struct sock *sk)
765 l2cap_sock_clear_timer(sk);
767 __l2cap_sock_close(sk, ECONNRESET);
772 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
774 struct l2cap_pinfo *pi = l2cap_pi(sk);
779 sk->sk_type = parent->sk_type;
780 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
782 pi->imtu = l2cap_pi(parent)->imtu;
783 pi->omtu = l2cap_pi(parent)->omtu;
784 pi->mode = l2cap_pi(parent)->mode;
785 pi->fcs = l2cap_pi(parent)->fcs;
786 pi->max_tx = l2cap_pi(parent)->max_tx;
787 pi->tx_win = l2cap_pi(parent)->tx_win;
788 pi->sec_level = l2cap_pi(parent)->sec_level;
789 pi->role_switch = l2cap_pi(parent)->role_switch;
790 pi->force_reliable = l2cap_pi(parent)->force_reliable;
792 pi->imtu = L2CAP_DEFAULT_MTU;
794 pi->mode = L2CAP_MODE_BASIC;
795 pi->max_tx = max_transmit;
796 pi->fcs = L2CAP_FCS_CRC16;
797 pi->tx_win = tx_window;
798 pi->sec_level = BT_SECURITY_LOW;
800 pi->force_reliable = 0;
803 /* Default config options */
805 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
806 skb_queue_head_init(TX_QUEUE(sk));
807 skb_queue_head_init(SREJ_QUEUE(sk));
808 INIT_LIST_HEAD(SREJ_LIST(sk));
811 static struct proto l2cap_proto = {
813 .owner = THIS_MODULE,
814 .obj_size = sizeof(struct l2cap_pinfo)
817 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
821 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
825 sock_init_data(sock, sk);
826 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
828 sk->sk_destruct = l2cap_sock_destruct;
829 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
831 sock_reset_flag(sk, SOCK_ZAPPED);
833 sk->sk_protocol = proto;
834 sk->sk_state = BT_OPEN;
836 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
838 bt_sock_link(&l2cap_sk_list, sk);
842 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
847 BT_DBG("sock %p", sock);
849 sock->state = SS_UNCONNECTED;
851 if (sock->type != SOCK_SEQPACKET &&
852 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
853 return -ESOCKTNOSUPPORT;
855 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
858 sock->ops = &l2cap_sock_ops;
860 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
864 l2cap_sock_init(sk, NULL);
868 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
870 struct sock *sk = sock->sk;
871 struct sockaddr_l2 la;
876 if (!addr || addr->sa_family != AF_BLUETOOTH)
879 memset(&la, 0, sizeof(la));
880 len = min_t(unsigned int, sizeof(la), alen);
881 memcpy(&la, addr, len);
888 if (sk->sk_state != BT_OPEN) {
893 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
894 !capable(CAP_NET_BIND_SERVICE)) {
899 write_lock_bh(&l2cap_sk_list.lock);
901 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
904 /* Save source address */
905 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
906 l2cap_pi(sk)->psm = la.l2_psm;
907 l2cap_pi(sk)->sport = la.l2_psm;
908 sk->sk_state = BT_BOUND;
910 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
911 __le16_to_cpu(la.l2_psm) == 0x0003)
912 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
915 write_unlock_bh(&l2cap_sk_list.lock);
922 static int l2cap_do_connect(struct sock *sk)
924 bdaddr_t *src = &bt_sk(sk)->src;
925 bdaddr_t *dst = &bt_sk(sk)->dst;
926 struct l2cap_conn *conn;
927 struct hci_conn *hcon;
928 struct hci_dev *hdev;
932 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
935 hdev = hci_get_route(dst, src);
937 return -EHOSTUNREACH;
939 hci_dev_lock_bh(hdev);
943 if (sk->sk_type == SOCK_RAW) {
944 switch (l2cap_pi(sk)->sec_level) {
945 case BT_SECURITY_HIGH:
946 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
948 case BT_SECURITY_MEDIUM:
949 auth_type = HCI_AT_DEDICATED_BONDING;
952 auth_type = HCI_AT_NO_BONDING;
955 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
956 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
957 auth_type = HCI_AT_NO_BONDING_MITM;
959 auth_type = HCI_AT_NO_BONDING;
961 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
962 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
964 switch (l2cap_pi(sk)->sec_level) {
965 case BT_SECURITY_HIGH:
966 auth_type = HCI_AT_GENERAL_BONDING_MITM;
968 case BT_SECURITY_MEDIUM:
969 auth_type = HCI_AT_GENERAL_BONDING;
972 auth_type = HCI_AT_NO_BONDING;
977 hcon = hci_connect(hdev, ACL_LINK, dst,
978 l2cap_pi(sk)->sec_level, auth_type);
982 conn = l2cap_conn_add(hcon, 0);
990 /* Update source addr of the socket */
991 bacpy(src, conn->src);
993 l2cap_chan_add(conn, sk, NULL);
995 sk->sk_state = BT_CONNECT;
996 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
998 if (hcon->state == BT_CONNECTED) {
999 if (sk->sk_type != SOCK_SEQPACKET) {
1000 l2cap_sock_clear_timer(sk);
1001 sk->sk_state = BT_CONNECTED;
1007 hci_dev_unlock_bh(hdev);
1012 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1014 struct sock *sk = sock->sk;
1015 struct sockaddr_l2 la;
1018 BT_DBG("sk %p", sk);
1020 if (!addr || alen < sizeof(addr->sa_family) ||
1021 addr->sa_family != AF_BLUETOOTH)
1024 memset(&la, 0, sizeof(la));
1025 len = min_t(unsigned int, sizeof(la), alen);
1026 memcpy(&la, addr, len);
1033 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1038 switch (l2cap_pi(sk)->mode) {
1039 case L2CAP_MODE_BASIC:
1041 case L2CAP_MODE_ERTM:
1042 case L2CAP_MODE_STREAMING:
1051 switch (sk->sk_state) {
1055 /* Already connecting */
1059 /* Already connected */
1072 /* Set destination address and psm */
1073 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1074 l2cap_pi(sk)->psm = la.l2_psm;
1076 err = l2cap_do_connect(sk);
1081 err = bt_sock_wait_state(sk, BT_CONNECTED,
1082 sock_sndtimeo(sk, flags & O_NONBLOCK));
1088 static int l2cap_sock_listen(struct socket *sock, int backlog)
1090 struct sock *sk = sock->sk;
1093 BT_DBG("sk %p backlog %d", sk, backlog);
1097 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1102 switch (l2cap_pi(sk)->mode) {
1103 case L2CAP_MODE_BASIC:
1105 case L2CAP_MODE_ERTM:
1106 case L2CAP_MODE_STREAMING:
1115 if (!l2cap_pi(sk)->psm) {
1116 bdaddr_t *src = &bt_sk(sk)->src;
1121 write_lock_bh(&l2cap_sk_list.lock);
1123 for (psm = 0x1001; psm < 0x1100; psm += 2)
1124 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1125 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1126 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1131 write_unlock_bh(&l2cap_sk_list.lock);
1137 sk->sk_max_ack_backlog = backlog;
1138 sk->sk_ack_backlog = 0;
1139 sk->sk_state = BT_LISTEN;
1146 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1148 DECLARE_WAITQUEUE(wait, current);
1149 struct sock *sk = sock->sk, *nsk;
1153 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1155 if (sk->sk_state != BT_LISTEN) {
1160 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1162 BT_DBG("sk %p timeo %ld", sk, timeo);
1164 /* Wait for an incoming connection. (wake-one). */
1165 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1166 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1167 set_current_state(TASK_INTERRUPTIBLE);
1174 timeo = schedule_timeout(timeo);
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1182 if (signal_pending(current)) {
1183 err = sock_intr_errno(timeo);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(sk_sleep(sk), &wait);
1193 newsock->state = SS_CONNECTED;
1195 BT_DBG("new socket %p", nsk);
1202 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1204 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1205 struct sock *sk = sock->sk;
1207 BT_DBG("sock %p, sk %p", sock, sk);
1209 addr->sa_family = AF_BLUETOOTH;
1210 *len = sizeof(struct sockaddr_l2);
1213 la->l2_psm = l2cap_pi(sk)->psm;
1214 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1215 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1217 la->l2_psm = l2cap_pi(sk)->sport;
1218 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1219 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1225 static void l2cap_monitor_timeout(unsigned long arg)
1227 struct sock *sk = (void *) arg;
1231 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1232 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1237 l2cap_pi(sk)->retry_count++;
1238 __mod_monitor_timer();
1240 control = L2CAP_CTRL_POLL;
1241 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1245 static void l2cap_retrans_timeout(unsigned long arg)
1247 struct sock *sk = (void *) arg;
1251 l2cap_pi(sk)->retry_count = 1;
1252 __mod_monitor_timer();
1254 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1256 control = L2CAP_CTRL_POLL;
1257 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1261 static void l2cap_drop_acked_frames(struct sock *sk)
1263 struct sk_buff *skb;
1265 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1266 l2cap_pi(sk)->unacked_frames) {
1267 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1270 skb = skb_dequeue(TX_QUEUE(sk));
1273 l2cap_pi(sk)->unacked_frames--;
1276 if (!l2cap_pi(sk)->unacked_frames)
1277 del_timer(&l2cap_pi(sk)->retrans_timer);
1282 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1284 struct l2cap_pinfo *pi = l2cap_pi(sk);
1287 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1289 err = hci_send_acl(pi->conn->hcon, skb, 0);
1296 static int l2cap_streaming_send(struct sock *sk)
1298 struct sk_buff *skb, *tx_skb;
1299 struct l2cap_pinfo *pi = l2cap_pi(sk);
1303 while ((skb = sk->sk_send_head)) {
1304 tx_skb = skb_clone(skb, GFP_ATOMIC);
1306 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1307 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1308 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1310 if (pi->fcs == L2CAP_FCS_CRC16) {
1311 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1312 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1315 err = l2cap_do_send(sk, tx_skb);
1317 l2cap_send_disconn_req(pi->conn, sk);
1321 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1323 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1324 sk->sk_send_head = NULL;
1326 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1328 skb = skb_dequeue(TX_QUEUE(sk));
1334 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1336 struct l2cap_pinfo *pi = l2cap_pi(sk);
1337 struct sk_buff *skb, *tx_skb;
1341 skb = skb_peek(TX_QUEUE(sk));
1343 if (bt_cb(skb)->tx_seq != tx_seq) {
1344 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1346 skb = skb_queue_next(TX_QUEUE(sk), skb);
1350 if (pi->remote_max_tx &&
1351 bt_cb(skb)->retries == pi->remote_max_tx) {
1352 l2cap_send_disconn_req(pi->conn, sk);
1356 tx_skb = skb_clone(skb, GFP_ATOMIC);
1357 bt_cb(skb)->retries++;
1358 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1359 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1360 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1361 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1363 if (pi->fcs == L2CAP_FCS_CRC16) {
1364 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1365 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1368 err = l2cap_do_send(sk, tx_skb);
1370 l2cap_send_disconn_req(pi->conn, sk);
1378 static int l2cap_ertm_send(struct sock *sk)
1380 struct sk_buff *skb, *tx_skb;
1381 struct l2cap_pinfo *pi = l2cap_pi(sk);
1385 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1388 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1389 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1391 if (pi->remote_max_tx &&
1392 bt_cb(skb)->retries == pi->remote_max_tx) {
1393 l2cap_send_disconn_req(pi->conn, sk);
1397 tx_skb = skb_clone(skb, GFP_ATOMIC);
1399 bt_cb(skb)->retries++;
1401 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1402 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1403 control |= L2CAP_CTRL_FINAL;
1404 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1406 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1407 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1408 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1411 if (pi->fcs == L2CAP_FCS_CRC16) {
1412 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1413 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1416 err = l2cap_do_send(sk, tx_skb);
1418 l2cap_send_disconn_req(pi->conn, sk);
1421 __mod_retrans_timer();
1423 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1424 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1426 pi->unacked_frames++;
1429 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1430 sk->sk_send_head = NULL;
1432 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1440 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1442 struct sock *sk = (struct sock *)pi;
1445 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1447 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1448 control |= L2CAP_SUPER_RCV_NOT_READY;
1449 return l2cap_send_sframe(pi, control);
1450 } else if (l2cap_ertm_send(sk) == 0) {
1451 control |= L2CAP_SUPER_RCV_READY;
1452 return l2cap_send_sframe(pi, control);
1457 static int l2cap_send_srejtail(struct sock *sk)
1459 struct srej_list *tail;
1462 control = L2CAP_SUPER_SELECT_REJECT;
1463 control |= L2CAP_CTRL_FINAL;
1465 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1466 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1468 l2cap_send_sframe(l2cap_pi(sk), control);
1473 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1475 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1476 struct sk_buff **frag;
1479 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1486 /* Continuation fragments (no L2CAP header) */
1487 frag = &skb_shinfo(skb)->frag_list;
1489 count = min_t(unsigned int, conn->mtu, len);
1491 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1494 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1500 frag = &(*frag)->next;
1506 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1508 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1509 struct sk_buff *skb;
1510 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1511 struct l2cap_hdr *lh;
1513 BT_DBG("sk %p len %d", sk, (int)len);
1515 count = min_t(unsigned int, (conn->mtu - hlen), len);
1516 skb = bt_skb_send_alloc(sk, count + hlen,
1517 msg->msg_flags & MSG_DONTWAIT, &err);
1519 return ERR_PTR(-ENOMEM);
1521 /* Create L2CAP header */
1522 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1523 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1524 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1525 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1527 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1528 if (unlikely(err < 0)) {
1530 return ERR_PTR(err);
1535 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1537 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1538 struct sk_buff *skb;
1539 int err, count, hlen = L2CAP_HDR_SIZE;
1540 struct l2cap_hdr *lh;
1542 BT_DBG("sk %p len %d", sk, (int)len);
1544 count = min_t(unsigned int, (conn->mtu - hlen), len);
1545 skb = bt_skb_send_alloc(sk, count + hlen,
1546 msg->msg_flags & MSG_DONTWAIT, &err);
1548 return ERR_PTR(-ENOMEM);
1550 /* Create L2CAP header */
1551 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1552 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1553 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1555 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1556 if (unlikely(err < 0)) {
1558 return ERR_PTR(err);
1563 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1565 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1566 struct sk_buff *skb;
1567 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1568 struct l2cap_hdr *lh;
1570 BT_DBG("sk %p len %d", sk, (int)len);
1573 return ERR_PTR(-ENOTCONN);
1578 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1581 count = min_t(unsigned int, (conn->mtu - hlen), len);
1582 skb = bt_skb_send_alloc(sk, count + hlen,
1583 msg->msg_flags & MSG_DONTWAIT, &err);
1585 return ERR_PTR(-ENOMEM);
1587 /* Create L2CAP header */
1588 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1589 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1590 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1591 put_unaligned_le16(control, skb_put(skb, 2));
1593 put_unaligned_le16(sdulen, skb_put(skb, 2));
1595 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1596 if (unlikely(err < 0)) {
1598 return ERR_PTR(err);
1601 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1602 put_unaligned_le16(0, skb_put(skb, 2));
1604 bt_cb(skb)->retries = 0;
1608 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1610 struct l2cap_pinfo *pi = l2cap_pi(sk);
1611 struct sk_buff *skb;
1612 struct sk_buff_head sar_queue;
1616 __skb_queue_head_init(&sar_queue);
1617 control = L2CAP_SDU_START;
1618 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1620 return PTR_ERR(skb);
1622 __skb_queue_tail(&sar_queue, skb);
1623 len -= pi->remote_mps;
1624 size += pi->remote_mps;
1630 if (len > pi->remote_mps) {
1631 control |= L2CAP_SDU_CONTINUE;
1632 buflen = pi->remote_mps;
1634 control |= L2CAP_SDU_END;
1638 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1640 skb_queue_purge(&sar_queue);
1641 return PTR_ERR(skb);
1644 __skb_queue_tail(&sar_queue, skb);
1649 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1650 if (sk->sk_send_head == NULL)
1651 sk->sk_send_head = sar_queue.next;
1656 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1658 struct sock *sk = sock->sk;
1659 struct l2cap_pinfo *pi = l2cap_pi(sk);
1660 struct sk_buff *skb;
1664 BT_DBG("sock %p, sk %p", sock, sk);
1666 err = sock_error(sk);
1670 if (msg->msg_flags & MSG_OOB)
1675 if (sk->sk_state != BT_CONNECTED) {
1680 /* Connectionless channel */
1681 if (sk->sk_type == SOCK_DGRAM) {
1682 skb = l2cap_create_connless_pdu(sk, msg, len);
1686 err = l2cap_do_send(sk, skb);
1691 case L2CAP_MODE_BASIC:
1692 /* Check outgoing MTU */
1693 if (len > pi->omtu) {
1698 /* Create a basic PDU */
1699 skb = l2cap_create_basic_pdu(sk, msg, len);
1705 err = l2cap_do_send(sk, skb);
1710 case L2CAP_MODE_ERTM:
1711 case L2CAP_MODE_STREAMING:
1712 /* Entire SDU fits into one PDU */
1713 if (len <= pi->remote_mps) {
1714 control = L2CAP_SDU_UNSEGMENTED;
1715 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1720 __skb_queue_tail(TX_QUEUE(sk), skb);
1721 if (sk->sk_send_head == NULL)
1722 sk->sk_send_head = skb;
1724 /* Segment SDU into multiples PDUs */
1725 err = l2cap_sar_segment_sdu(sk, msg, len);
1730 if (pi->mode == L2CAP_MODE_STREAMING)
1731 err = l2cap_streaming_send(sk);
1733 err = l2cap_ertm_send(sk);
1740 BT_DBG("bad state %1.1x", pi->mode);
1749 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1751 struct sock *sk = sock->sk;
1755 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1756 struct l2cap_conn_rsp rsp;
1758 sk->sk_state = BT_CONFIG;
1760 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1761 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1762 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1763 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1764 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1765 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1773 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1776 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1778 struct sock *sk = sock->sk;
1779 struct l2cap_options opts;
1783 BT_DBG("sk %p", sk);
1789 opts.imtu = l2cap_pi(sk)->imtu;
1790 opts.omtu = l2cap_pi(sk)->omtu;
1791 opts.flush_to = l2cap_pi(sk)->flush_to;
1792 opts.mode = l2cap_pi(sk)->mode;
1793 opts.fcs = l2cap_pi(sk)->fcs;
1794 opts.max_tx = l2cap_pi(sk)->max_tx;
1795 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1797 len = min_t(unsigned int, sizeof(opts), optlen);
1798 if (copy_from_user((char *) &opts, optval, len)) {
1803 l2cap_pi(sk)->imtu = opts.imtu;
1804 l2cap_pi(sk)->omtu = opts.omtu;
1805 l2cap_pi(sk)->mode = opts.mode;
1806 l2cap_pi(sk)->fcs = opts.fcs;
1807 l2cap_pi(sk)->max_tx = opts.max_tx;
1808 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1812 if (get_user(opt, (u32 __user *) optval)) {
1817 if (opt & L2CAP_LM_AUTH)
1818 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1819 if (opt & L2CAP_LM_ENCRYPT)
1820 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1821 if (opt & L2CAP_LM_SECURE)
1822 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1824 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1825 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1837 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1839 struct sock *sk = sock->sk;
1840 struct bt_security sec;
1844 BT_DBG("sk %p", sk);
1846 if (level == SOL_L2CAP)
1847 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1849 if (level != SOL_BLUETOOTH)
1850 return -ENOPROTOOPT;
1856 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1861 sec.level = BT_SECURITY_LOW;
1863 len = min_t(unsigned int, sizeof(sec), optlen);
1864 if (copy_from_user((char *) &sec, optval, len)) {
1869 if (sec.level < BT_SECURITY_LOW ||
1870 sec.level > BT_SECURITY_HIGH) {
1875 l2cap_pi(sk)->sec_level = sec.level;
1878 case BT_DEFER_SETUP:
1879 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1884 if (get_user(opt, (u32 __user *) optval)) {
1889 bt_sk(sk)->defer_setup = opt;
1901 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1903 struct sock *sk = sock->sk;
1904 struct l2cap_options opts;
1905 struct l2cap_conninfo cinfo;
1909 BT_DBG("sk %p", sk);
1911 if (get_user(len, optlen))
1918 opts.imtu = l2cap_pi(sk)->imtu;
1919 opts.omtu = l2cap_pi(sk)->omtu;
1920 opts.flush_to = l2cap_pi(sk)->flush_to;
1921 opts.mode = l2cap_pi(sk)->mode;
1922 opts.fcs = l2cap_pi(sk)->fcs;
1923 opts.max_tx = l2cap_pi(sk)->max_tx;
1924 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1926 len = min_t(unsigned int, len, sizeof(opts));
1927 if (copy_to_user(optval, (char *) &opts, len))
1933 switch (l2cap_pi(sk)->sec_level) {
1934 case BT_SECURITY_LOW:
1935 opt = L2CAP_LM_AUTH;
1937 case BT_SECURITY_MEDIUM:
1938 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1940 case BT_SECURITY_HIGH:
1941 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1949 if (l2cap_pi(sk)->role_switch)
1950 opt |= L2CAP_LM_MASTER;
1952 if (l2cap_pi(sk)->force_reliable)
1953 opt |= L2CAP_LM_RELIABLE;
1955 if (put_user(opt, (u32 __user *) optval))
1959 case L2CAP_CONNINFO:
1960 if (sk->sk_state != BT_CONNECTED &&
1961 !(sk->sk_state == BT_CONNECT2 &&
1962 bt_sk(sk)->defer_setup)) {
1967 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1968 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1970 len = min_t(unsigned int, len, sizeof(cinfo));
1971 if (copy_to_user(optval, (char *) &cinfo, len))
1985 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1987 struct sock *sk = sock->sk;
1988 struct bt_security sec;
1991 BT_DBG("sk %p", sk);
1993 if (level == SOL_L2CAP)
1994 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
1996 if (level != SOL_BLUETOOTH)
1997 return -ENOPROTOOPT;
1999 if (get_user(len, optlen))
2006 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2011 sec.level = l2cap_pi(sk)->sec_level;
2013 len = min_t(unsigned int, len, sizeof(sec));
2014 if (copy_to_user(optval, (char *) &sec, len))
2019 case BT_DEFER_SETUP:
2020 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2025 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2039 static int l2cap_sock_shutdown(struct socket *sock, int how)
2041 struct sock *sk = sock->sk;
2044 BT_DBG("sock %p, sk %p", sock, sk);
2050 if (!sk->sk_shutdown) {
2051 sk->sk_shutdown = SHUTDOWN_MASK;
2052 l2cap_sock_clear_timer(sk);
2053 __l2cap_sock_close(sk, 0);
2055 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2056 err = bt_sock_wait_state(sk, BT_CLOSED,
2063 static int l2cap_sock_release(struct socket *sock)
2065 struct sock *sk = sock->sk;
2068 BT_DBG("sock %p, sk %p", sock, sk);
2073 err = l2cap_sock_shutdown(sock, 2);
2076 l2cap_sock_kill(sk);
2080 static void l2cap_chan_ready(struct sock *sk)
2082 struct sock *parent = bt_sk(sk)->parent;
2084 BT_DBG("sk %p, parent %p", sk, parent);
2086 l2cap_pi(sk)->conf_state = 0;
2087 l2cap_sock_clear_timer(sk);
2090 /* Outgoing channel.
2091 * Wake up socket sleeping on connect.
2093 sk->sk_state = BT_CONNECTED;
2094 sk->sk_state_change(sk);
2096 /* Incoming channel.
2097 * Wake up socket sleeping on accept.
2099 parent->sk_data_ready(parent, 0);
2103 /* Copy frame to all raw sockets on that connection */
2104 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2106 struct l2cap_chan_list *l = &conn->chan_list;
2107 struct sk_buff *nskb;
2110 BT_DBG("conn %p", conn);
2112 read_lock(&l->lock);
2113 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2114 if (sk->sk_type != SOCK_RAW)
2117 /* Don't send frame to the socket it came from */
2120 nskb = skb_clone(skb, GFP_ATOMIC);
2124 if (sock_queue_rcv_skb(sk, nskb))
2127 read_unlock(&l->lock);
2130 /* ---- L2CAP signalling commands ---- */
2131 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2132 u8 code, u8 ident, u16 dlen, void *data)
2134 struct sk_buff *skb, **frag;
2135 struct l2cap_cmd_hdr *cmd;
2136 struct l2cap_hdr *lh;
2139 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2140 conn, code, ident, dlen);
2142 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2143 count = min_t(unsigned int, conn->mtu, len);
2145 skb = bt_skb_alloc(count, GFP_ATOMIC);
2149 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2150 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2151 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2153 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2156 cmd->len = cpu_to_le16(dlen);
2159 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2160 memcpy(skb_put(skb, count), data, count);
2166 /* Continuation fragments (no L2CAP header) */
2167 frag = &skb_shinfo(skb)->frag_list;
2169 count = min_t(unsigned int, conn->mtu, len);
2171 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2175 memcpy(skb_put(*frag, count), data, count);
2180 frag = &(*frag)->next;
2190 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2192 struct l2cap_conf_opt *opt = *ptr;
2195 len = L2CAP_CONF_OPT_SIZE + opt->len;
2203 *val = *((u8 *) opt->val);
2207 *val = __le16_to_cpu(*((__le16 *) opt->val));
2211 *val = __le32_to_cpu(*((__le32 *) opt->val));
2215 *val = (unsigned long) opt->val;
2219 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2223 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2225 struct l2cap_conf_opt *opt = *ptr;
2227 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2234 *((u8 *) opt->val) = val;
2238 *((__le16 *) opt->val) = cpu_to_le16(val);
2242 *((__le32 *) opt->val) = cpu_to_le32(val);
2246 memcpy(opt->val, (void *) val, len);
2250 *ptr += L2CAP_CONF_OPT_SIZE + len;
2253 static void l2cap_ack_timeout(unsigned long arg)
2255 struct sock *sk = (void *) arg;
2258 l2cap_send_ack(l2cap_pi(sk));
2262 static inline void l2cap_ertm_init(struct sock *sk)
2264 l2cap_pi(sk)->expected_ack_seq = 0;
2265 l2cap_pi(sk)->unacked_frames = 0;
2266 l2cap_pi(sk)->buffer_seq = 0;
2267 l2cap_pi(sk)->num_acked = 0;
2268 l2cap_pi(sk)->frames_sent = 0;
2270 setup_timer(&l2cap_pi(sk)->retrans_timer,
2271 l2cap_retrans_timeout, (unsigned long) sk);
2272 setup_timer(&l2cap_pi(sk)->monitor_timer,
2273 l2cap_monitor_timeout, (unsigned long) sk);
2274 setup_timer(&l2cap_pi(sk)->ack_timer,
2275 l2cap_ack_timeout, (unsigned long) sk);
2277 __skb_queue_head_init(SREJ_QUEUE(sk));
2280 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2282 u32 local_feat_mask = l2cap_feat_mask;
2284 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2287 case L2CAP_MODE_ERTM:
2288 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2289 case L2CAP_MODE_STREAMING:
2290 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2296 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2299 case L2CAP_MODE_STREAMING:
2300 case L2CAP_MODE_ERTM:
2301 if (l2cap_mode_supported(mode, remote_feat_mask))
2305 return L2CAP_MODE_BASIC;
2309 static int l2cap_build_conf_req(struct sock *sk, void *data)
2311 struct l2cap_pinfo *pi = l2cap_pi(sk);
2312 struct l2cap_conf_req *req = data;
2313 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2314 void *ptr = req->data;
2316 BT_DBG("sk %p", sk);
2318 if (pi->num_conf_req || pi->num_conf_rsp)
2322 case L2CAP_MODE_STREAMING:
2323 case L2CAP_MODE_ERTM:
2324 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2325 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2326 l2cap_send_disconn_req(pi->conn, sk);
2329 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2335 case L2CAP_MODE_BASIC:
2336 if (pi->imtu != L2CAP_DEFAULT_MTU)
2337 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2340 case L2CAP_MODE_ERTM:
2341 rfc.mode = L2CAP_MODE_ERTM;
2342 rfc.txwin_size = pi->tx_win;
2343 rfc.max_transmit = pi->max_tx;
2344 rfc.retrans_timeout = 0;
2345 rfc.monitor_timeout = 0;
2346 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2347 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2348 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2350 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2351 sizeof(rfc), (unsigned long) &rfc);
2353 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2356 if (pi->fcs == L2CAP_FCS_NONE ||
2357 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2358 pi->fcs = L2CAP_FCS_NONE;
2359 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2363 case L2CAP_MODE_STREAMING:
2364 rfc.mode = L2CAP_MODE_STREAMING;
2366 rfc.max_transmit = 0;
2367 rfc.retrans_timeout = 0;
2368 rfc.monitor_timeout = 0;
2369 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2370 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2371 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2374 sizeof(rfc), (unsigned long) &rfc);
2376 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2379 if (pi->fcs == L2CAP_FCS_NONE ||
2380 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2381 pi->fcs = L2CAP_FCS_NONE;
2382 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2387 /* FIXME: Need actual value of the flush timeout */
2388 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2389 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2391 req->dcid = cpu_to_le16(pi->dcid);
2392 req->flags = cpu_to_le16(0);
2397 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2399 struct l2cap_pinfo *pi = l2cap_pi(sk);
2400 struct l2cap_conf_rsp *rsp = data;
2401 void *ptr = rsp->data;
2402 void *req = pi->conf_req;
2403 int len = pi->conf_len;
2404 int type, hint, olen;
2406 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2407 u16 mtu = L2CAP_DEFAULT_MTU;
2408 u16 result = L2CAP_CONF_SUCCESS;
2410 BT_DBG("sk %p", sk);
2412 while (len >= L2CAP_CONF_OPT_SIZE) {
2413 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2415 hint = type & L2CAP_CONF_HINT;
2416 type &= L2CAP_CONF_MASK;
2419 case L2CAP_CONF_MTU:
2423 case L2CAP_CONF_FLUSH_TO:
2427 case L2CAP_CONF_QOS:
2430 case L2CAP_CONF_RFC:
2431 if (olen == sizeof(rfc))
2432 memcpy(&rfc, (void *) val, olen);
2435 case L2CAP_CONF_FCS:
2436 if (val == L2CAP_FCS_NONE)
2437 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2445 result = L2CAP_CONF_UNKNOWN;
2446 *((u8 *) ptr++) = type;
2451 if (pi->num_conf_rsp || pi->num_conf_req)
2455 case L2CAP_MODE_STREAMING:
2456 case L2CAP_MODE_ERTM:
2457 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2458 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2459 return -ECONNREFUSED;
2462 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2467 if (pi->mode != rfc.mode) {
2468 result = L2CAP_CONF_UNACCEPT;
2469 rfc.mode = pi->mode;
2471 if (pi->num_conf_rsp == 1)
2472 return -ECONNREFUSED;
2474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2475 sizeof(rfc), (unsigned long) &rfc);
2479 if (result == L2CAP_CONF_SUCCESS) {
2480 /* Configure output options and let the other side know
2481 * which ones we don't like. */
2483 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2484 result = L2CAP_CONF_UNACCEPT;
2487 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2489 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2492 case L2CAP_MODE_BASIC:
2493 pi->fcs = L2CAP_FCS_NONE;
2494 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2497 case L2CAP_MODE_ERTM:
2498 pi->remote_tx_win = rfc.txwin_size;
2499 pi->remote_max_tx = rfc.max_transmit;
2500 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2501 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2503 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2505 rfc.retrans_timeout =
2506 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2507 rfc.monitor_timeout =
2508 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2510 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2513 sizeof(rfc), (unsigned long) &rfc);
2517 case L2CAP_MODE_STREAMING:
2518 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2519 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2521 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2523 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2526 sizeof(rfc), (unsigned long) &rfc);
2531 result = L2CAP_CONF_UNACCEPT;
2533 memset(&rfc, 0, sizeof(rfc));
2534 rfc.mode = pi->mode;
2537 if (result == L2CAP_CONF_SUCCESS)
2538 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2540 rsp->scid = cpu_to_le16(pi->dcid);
2541 rsp->result = cpu_to_le16(result);
2542 rsp->flags = cpu_to_le16(0x0000);
2547 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2549 struct l2cap_pinfo *pi = l2cap_pi(sk);
2550 struct l2cap_conf_req *req = data;
2551 void *ptr = req->data;
2554 struct l2cap_conf_rfc rfc;
2556 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2558 while (len >= L2CAP_CONF_OPT_SIZE) {
2559 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2562 case L2CAP_CONF_MTU:
2563 if (val < L2CAP_DEFAULT_MIN_MTU) {
2564 *result = L2CAP_CONF_UNACCEPT;
2565 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2571 case L2CAP_CONF_FLUSH_TO:
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2577 case L2CAP_CONF_RFC:
2578 if (olen == sizeof(rfc))
2579 memcpy(&rfc, (void *)val, olen);
2581 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2582 rfc.mode != pi->mode)
2583 return -ECONNREFUSED;
2585 pi->mode = rfc.mode;
2588 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2589 sizeof(rfc), (unsigned long) &rfc);
2594 if (*result == L2CAP_CONF_SUCCESS) {
2596 case L2CAP_MODE_ERTM:
2597 pi->remote_tx_win = rfc.txwin_size;
2598 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2599 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2600 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2602 case L2CAP_MODE_STREAMING:
2603 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2607 req->dcid = cpu_to_le16(pi->dcid);
2608 req->flags = cpu_to_le16(0x0000);
2613 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2615 struct l2cap_conf_rsp *rsp = data;
2616 void *ptr = rsp->data;
2618 BT_DBG("sk %p", sk);
2620 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2621 rsp->result = cpu_to_le16(result);
2622 rsp->flags = cpu_to_le16(flags);
2627 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2629 struct l2cap_pinfo *pi = l2cap_pi(sk);
2632 struct l2cap_conf_rfc rfc;
2634 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2636 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2639 while (len >= L2CAP_CONF_OPT_SIZE) {
2640 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2643 case L2CAP_CONF_RFC:
2644 if (olen == sizeof(rfc))
2645 memcpy(&rfc, (void *)val, olen);
2652 case L2CAP_MODE_ERTM:
2653 pi->remote_tx_win = rfc.txwin_size;
2654 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2655 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2656 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2658 case L2CAP_MODE_STREAMING:
2659 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2663 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2665 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2667 if (rej->reason != 0x0000)
2670 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2671 cmd->ident == conn->info_ident) {
2672 del_timer(&conn->info_timer);
2674 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2675 conn->info_ident = 0;
2677 l2cap_conn_start(conn);
2683 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2685 struct l2cap_chan_list *list = &conn->chan_list;
2686 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2687 struct l2cap_conn_rsp rsp;
2688 struct sock *sk, *parent;
2689 int result, status = L2CAP_CS_NO_INFO;
2691 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2692 __le16 psm = req->psm;
2694 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2696 /* Check if we have socket listening on psm */
2697 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2699 result = L2CAP_CR_BAD_PSM;
2703 /* Check if the ACL is secure enough (if not SDP) */
2704 if (psm != cpu_to_le16(0x0001) &&
2705 !hci_conn_check_link_mode(conn->hcon)) {
2706 conn->disc_reason = 0x05;
2707 result = L2CAP_CR_SEC_BLOCK;
2711 result = L2CAP_CR_NO_MEM;
2713 /* Check for backlog size */
2714 if (sk_acceptq_is_full(parent)) {
2715 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2719 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2723 write_lock_bh(&list->lock);
2725 /* Check if we already have channel with that dcid */
2726 if (__l2cap_get_chan_by_dcid(list, scid)) {
2727 write_unlock_bh(&list->lock);
2728 sock_set_flag(sk, SOCK_ZAPPED);
2729 l2cap_sock_kill(sk);
2733 hci_conn_hold(conn->hcon);
2735 l2cap_sock_init(sk, parent);
2736 bacpy(&bt_sk(sk)->src, conn->src);
2737 bacpy(&bt_sk(sk)->dst, conn->dst);
2738 l2cap_pi(sk)->psm = psm;
2739 l2cap_pi(sk)->dcid = scid;
2741 __l2cap_chan_add(conn, sk, parent);
2742 dcid = l2cap_pi(sk)->scid;
2744 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2746 l2cap_pi(sk)->ident = cmd->ident;
2748 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2749 if (l2cap_check_security(sk)) {
2750 if (bt_sk(sk)->defer_setup) {
2751 sk->sk_state = BT_CONNECT2;
2752 result = L2CAP_CR_PEND;
2753 status = L2CAP_CS_AUTHOR_PEND;
2754 parent->sk_data_ready(parent, 0);
2756 sk->sk_state = BT_CONFIG;
2757 result = L2CAP_CR_SUCCESS;
2758 status = L2CAP_CS_NO_INFO;
2761 sk->sk_state = BT_CONNECT2;
2762 result = L2CAP_CR_PEND;
2763 status = L2CAP_CS_AUTHEN_PEND;
2766 sk->sk_state = BT_CONNECT2;
2767 result = L2CAP_CR_PEND;
2768 status = L2CAP_CS_NO_INFO;
2771 write_unlock_bh(&list->lock);
2774 bh_unlock_sock(parent);
2777 rsp.scid = cpu_to_le16(scid);
2778 rsp.dcid = cpu_to_le16(dcid);
2779 rsp.result = cpu_to_le16(result);
2780 rsp.status = cpu_to_le16(status);
2781 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2783 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2784 struct l2cap_info_req info;
2785 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2787 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2788 conn->info_ident = l2cap_get_ident(conn);
2790 mod_timer(&conn->info_timer, jiffies +
2791 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2793 l2cap_send_cmd(conn, conn->info_ident,
2794 L2CAP_INFO_REQ, sizeof(info), &info);
2800 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2802 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2803 u16 scid, dcid, result, status;
2807 scid = __le16_to_cpu(rsp->scid);
2808 dcid = __le16_to_cpu(rsp->dcid);
2809 result = __le16_to_cpu(rsp->result);
2810 status = __le16_to_cpu(rsp->status);
2812 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2815 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2819 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2825 case L2CAP_CR_SUCCESS:
2826 sk->sk_state = BT_CONFIG;
2827 l2cap_pi(sk)->ident = 0;
2828 l2cap_pi(sk)->dcid = dcid;
2829 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2831 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2833 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2834 l2cap_build_conf_req(sk, req), req);
2835 l2cap_pi(sk)->num_conf_req++;
2839 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2843 l2cap_chan_del(sk, ECONNREFUSED);
2851 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2853 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2859 dcid = __le16_to_cpu(req->dcid);
2860 flags = __le16_to_cpu(req->flags);
2862 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2864 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2868 if (sk->sk_state == BT_DISCONN)
2871 /* Reject if config buffer is too small. */
2872 len = cmd_len - sizeof(*req);
2873 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2874 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2875 l2cap_build_conf_rsp(sk, rsp,
2876 L2CAP_CONF_REJECT, flags), rsp);
2881 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2882 l2cap_pi(sk)->conf_len += len;
2884 if (flags & 0x0001) {
2885 /* Incomplete config. Send empty response. */
2886 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2887 l2cap_build_conf_rsp(sk, rsp,
2888 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2892 /* Complete config. */
2893 len = l2cap_parse_conf_req(sk, rsp);
2895 l2cap_send_disconn_req(conn, sk);
2899 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2900 l2cap_pi(sk)->num_conf_rsp++;
2902 /* Reset config buffer. */
2903 l2cap_pi(sk)->conf_len = 0;
2905 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2908 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2909 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2910 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2911 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2913 sk->sk_state = BT_CONNECTED;
2915 l2cap_pi(sk)->next_tx_seq = 0;
2916 l2cap_pi(sk)->expected_tx_seq = 0;
2917 __skb_queue_head_init(TX_QUEUE(sk));
2918 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2919 l2cap_ertm_init(sk);
2921 l2cap_chan_ready(sk);
2925 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2928 l2cap_build_conf_req(sk, buf), buf);
2929 l2cap_pi(sk)->num_conf_req++;
2937 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2939 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2940 u16 scid, flags, result;
2942 int len = cmd->len - sizeof(*rsp);
2944 scid = __le16_to_cpu(rsp->scid);
2945 flags = __le16_to_cpu(rsp->flags);
2946 result = __le16_to_cpu(rsp->result);
2948 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2949 scid, flags, result);
2951 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2956 case L2CAP_CONF_SUCCESS:
2957 l2cap_conf_rfc_get(sk, rsp->data, len);
2960 case L2CAP_CONF_UNACCEPT:
2961 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2964 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2965 l2cap_send_disconn_req(conn, sk);
2969 /* throw out any old stored conf requests */
2970 result = L2CAP_CONF_SUCCESS;
2971 len = l2cap_parse_conf_rsp(sk, rsp->data,
2974 l2cap_send_disconn_req(conn, sk);
2978 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2979 L2CAP_CONF_REQ, len, req);
2980 l2cap_pi(sk)->num_conf_req++;
2981 if (result != L2CAP_CONF_SUCCESS)
2987 sk->sk_state = BT_DISCONN;
2988 sk->sk_err = ECONNRESET;
2989 l2cap_sock_set_timer(sk, HZ * 5);
2990 l2cap_send_disconn_req(conn, sk);
2997 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
2999 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3000 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3001 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3002 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3004 sk->sk_state = BT_CONNECTED;
3005 l2cap_pi(sk)->next_tx_seq = 0;
3006 l2cap_pi(sk)->expected_tx_seq = 0;
3007 __skb_queue_head_init(TX_QUEUE(sk));
3008 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3009 l2cap_ertm_init(sk);
3011 l2cap_chan_ready(sk);
3019 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3021 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3022 struct l2cap_disconn_rsp rsp;
3026 scid = __le16_to_cpu(req->scid);
3027 dcid = __le16_to_cpu(req->dcid);
3029 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3031 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3035 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3036 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3037 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3039 sk->sk_shutdown = SHUTDOWN_MASK;
3041 skb_queue_purge(TX_QUEUE(sk));
3043 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3044 skb_queue_purge(SREJ_QUEUE(sk));
3045 del_timer(&l2cap_pi(sk)->retrans_timer);
3046 del_timer(&l2cap_pi(sk)->monitor_timer);
3047 del_timer(&l2cap_pi(sk)->ack_timer);
3050 l2cap_chan_del(sk, ECONNRESET);
3053 l2cap_sock_kill(sk);
3057 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3059 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3063 scid = __le16_to_cpu(rsp->scid);
3064 dcid = __le16_to_cpu(rsp->dcid);
3066 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3068 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3072 skb_queue_purge(TX_QUEUE(sk));
3074 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3075 skb_queue_purge(SREJ_QUEUE(sk));
3076 del_timer(&l2cap_pi(sk)->retrans_timer);
3077 del_timer(&l2cap_pi(sk)->monitor_timer);
3078 del_timer(&l2cap_pi(sk)->ack_timer);
3081 l2cap_chan_del(sk, 0);
3084 l2cap_sock_kill(sk);
3088 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3090 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3093 type = __le16_to_cpu(req->type);
3095 BT_DBG("type 0x%4.4x", type);
3097 if (type == L2CAP_IT_FEAT_MASK) {
3099 u32 feat_mask = l2cap_feat_mask;
3100 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3101 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3102 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3104 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3106 put_unaligned_le32(feat_mask, rsp->data);
3107 l2cap_send_cmd(conn, cmd->ident,
3108 L2CAP_INFO_RSP, sizeof(buf), buf);
3109 } else if (type == L2CAP_IT_FIXED_CHAN) {
3111 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3112 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3113 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3114 memcpy(buf + 4, l2cap_fixed_chan, 8);
3115 l2cap_send_cmd(conn, cmd->ident,
3116 L2CAP_INFO_RSP, sizeof(buf), buf);
3118 struct l2cap_info_rsp rsp;
3119 rsp.type = cpu_to_le16(type);
3120 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3121 l2cap_send_cmd(conn, cmd->ident,
3122 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3128 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3130 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3133 type = __le16_to_cpu(rsp->type);
3134 result = __le16_to_cpu(rsp->result);
3136 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3138 del_timer(&conn->info_timer);
3140 if (type == L2CAP_IT_FEAT_MASK) {
3141 conn->feat_mask = get_unaligned_le32(rsp->data);
3143 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3144 struct l2cap_info_req req;
3145 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3147 conn->info_ident = l2cap_get_ident(conn);
3149 l2cap_send_cmd(conn, conn->info_ident,
3150 L2CAP_INFO_REQ, sizeof(req), &req);
3152 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3153 conn->info_ident = 0;
3155 l2cap_conn_start(conn);
3157 } else if (type == L2CAP_IT_FIXED_CHAN) {
3158 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3159 conn->info_ident = 0;
3161 l2cap_conn_start(conn);
3167 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3169 u8 *data = skb->data;
3171 struct l2cap_cmd_hdr cmd;
3174 l2cap_raw_recv(conn, skb);
3176 while (len >= L2CAP_CMD_HDR_SIZE) {
3178 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3179 data += L2CAP_CMD_HDR_SIZE;
3180 len -= L2CAP_CMD_HDR_SIZE;
3182 cmd_len = le16_to_cpu(cmd.len);
3184 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3186 if (cmd_len > len || !cmd.ident) {
3187 BT_DBG("corrupted command");
3192 case L2CAP_COMMAND_REJ:
3193 l2cap_command_rej(conn, &cmd, data);
3196 case L2CAP_CONN_REQ:
3197 err = l2cap_connect_req(conn, &cmd, data);
3200 case L2CAP_CONN_RSP:
3201 err = l2cap_connect_rsp(conn, &cmd, data);
3204 case L2CAP_CONF_REQ:
3205 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3208 case L2CAP_CONF_RSP:
3209 err = l2cap_config_rsp(conn, &cmd, data);
3212 case L2CAP_DISCONN_REQ:
3213 err = l2cap_disconnect_req(conn, &cmd, data);
3216 case L2CAP_DISCONN_RSP:
3217 err = l2cap_disconnect_rsp(conn, &cmd, data);
3220 case L2CAP_ECHO_REQ:
3221 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3224 case L2CAP_ECHO_RSP:
3227 case L2CAP_INFO_REQ:
3228 err = l2cap_information_req(conn, &cmd, data);
3231 case L2CAP_INFO_RSP:
3232 err = l2cap_information_rsp(conn, &cmd, data);
3236 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3242 struct l2cap_cmd_rej rej;
3243 BT_DBG("error %d", err);
3245 /* FIXME: Map err to a valid reason */
3246 rej.reason = cpu_to_le16(0);
3247 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3257 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3259 u16 our_fcs, rcv_fcs;
3260 int hdr_size = L2CAP_HDR_SIZE + 2;
3262 if (pi->fcs == L2CAP_FCS_CRC16) {
3263 skb_trim(skb, skb->len - 2);
3264 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3265 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3267 if (our_fcs != rcv_fcs)
3273 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3275 struct l2cap_pinfo *pi = l2cap_pi(sk);
3278 pi->frames_sent = 0;
3279 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3281 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3283 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3284 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3285 l2cap_send_sframe(pi, control);
3286 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3289 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3290 __mod_retrans_timer();
3292 l2cap_ertm_send(sk);
3294 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3295 pi->frames_sent == 0) {
3296 control |= L2CAP_SUPER_RCV_READY;
3297 l2cap_send_sframe(pi, control);
3301 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3303 struct sk_buff *next_skb;
3305 bt_cb(skb)->tx_seq = tx_seq;
3306 bt_cb(skb)->sar = sar;
3308 next_skb = skb_peek(SREJ_QUEUE(sk));
3310 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3315 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3316 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3320 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3323 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3325 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3328 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3330 struct l2cap_pinfo *pi = l2cap_pi(sk);
3331 struct sk_buff *_skb;
3334 switch (control & L2CAP_CTRL_SAR) {
3335 case L2CAP_SDU_UNSEGMENTED:
3336 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3341 err = sock_queue_rcv_skb(sk, skb);
3347 case L2CAP_SDU_START:
3348 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3353 pi->sdu_len = get_unaligned_le16(skb->data);
3356 if (pi->sdu_len > pi->imtu) {
3361 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3367 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3369 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3370 pi->partial_sdu_len = skb->len;
3374 case L2CAP_SDU_CONTINUE:
3375 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3378 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3380 pi->partial_sdu_len += skb->len;
3381 if (pi->partial_sdu_len > pi->sdu_len)
3389 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3392 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3394 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3395 pi->partial_sdu_len += skb->len;
3397 if (pi->partial_sdu_len > pi->imtu)
3400 if (pi->partial_sdu_len == pi->sdu_len) {
3401 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3402 err = sock_queue_rcv_skb(sk, _skb);
3417 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3419 struct sk_buff *skb;
3422 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3423 if (bt_cb(skb)->tx_seq != tx_seq)
3426 skb = skb_dequeue(SREJ_QUEUE(sk));
3427 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3428 l2cap_sar_reassembly_sdu(sk, skb, control);
3429 l2cap_pi(sk)->buffer_seq_srej =
3430 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3435 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3437 struct l2cap_pinfo *pi = l2cap_pi(sk);
3438 struct srej_list *l, *tmp;
3441 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3442 if (l->tx_seq == tx_seq) {
3447 control = L2CAP_SUPER_SELECT_REJECT;
3448 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3449 l2cap_send_sframe(pi, control);
3451 list_add_tail(&l->list, SREJ_LIST(sk));
3455 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3457 struct l2cap_pinfo *pi = l2cap_pi(sk);
3458 struct srej_list *new;
3461 while (tx_seq != pi->expected_tx_seq) {
3462 control = L2CAP_SUPER_SELECT_REJECT;
3463 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3464 l2cap_send_sframe(pi, control);
3466 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3467 new->tx_seq = pi->expected_tx_seq++;
3468 list_add_tail(&new->list, SREJ_LIST(sk));
3470 pi->expected_tx_seq++;
3473 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3475 struct l2cap_pinfo *pi = l2cap_pi(sk);
3476 u8 tx_seq = __get_txseq(rx_control);
3477 u8 req_seq = __get_reqseq(rx_control);
3478 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3479 int num_to_ack = (pi->tx_win/6) + 1;
3482 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3484 if (L2CAP_CTRL_FINAL & rx_control) {
3485 del_timer(&pi->monitor_timer);
3486 if (pi->unacked_frames > 0)
3487 __mod_retrans_timer();
3488 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3491 pi->expected_ack_seq = req_seq;
3492 l2cap_drop_acked_frames(sk);
3494 if (tx_seq == pi->expected_tx_seq)
3497 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3498 struct srej_list *first;
3500 first = list_first_entry(SREJ_LIST(sk),
3501 struct srej_list, list);
3502 if (tx_seq == first->tx_seq) {
3503 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3504 l2cap_check_srej_gap(sk, tx_seq);
3506 list_del(&first->list);
3509 if (list_empty(SREJ_LIST(sk))) {
3510 pi->buffer_seq = pi->buffer_seq_srej;
3511 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3515 struct srej_list *l;
3516 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3518 list_for_each_entry(l, SREJ_LIST(sk), list) {
3519 if (l->tx_seq == tx_seq) {
3520 l2cap_resend_srejframe(sk, tx_seq);
3524 l2cap_send_srejframe(sk, tx_seq);
3527 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3529 INIT_LIST_HEAD(SREJ_LIST(sk));
3530 pi->buffer_seq_srej = pi->buffer_seq;
3532 __skb_queue_head_init(SREJ_QUEUE(sk));
3533 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3535 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3537 l2cap_send_srejframe(sk, tx_seq);
3542 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3544 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3545 bt_cb(skb)->tx_seq = tx_seq;
3546 bt_cb(skb)->sar = sar;
3547 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3551 if (rx_control & L2CAP_CTRL_FINAL) {
3552 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3553 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3555 if (!skb_queue_empty(TX_QUEUE(sk)))
3556 sk->sk_send_head = TX_QUEUE(sk)->next;
3557 pi->next_tx_seq = pi->expected_ack_seq;
3558 l2cap_ertm_send(sk);
3562 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3564 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3570 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3571 if (pi->num_acked == num_to_ack - 1)
3577 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3579 struct l2cap_pinfo *pi = l2cap_pi(sk);
3581 pi->expected_ack_seq = __get_reqseq(rx_control);
3582 l2cap_drop_acked_frames(sk);
3584 if (rx_control & L2CAP_CTRL_POLL) {
3585 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3586 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3587 (pi->unacked_frames > 0))
3588 __mod_retrans_timer();
3590 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3591 l2cap_send_srejtail(sk);
3593 l2cap_send_i_or_rr_or_rnr(sk);
3594 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3597 } else if (rx_control & L2CAP_CTRL_FINAL) {
3598 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3600 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3601 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3603 if (!skb_queue_empty(TX_QUEUE(sk)))
3604 sk->sk_send_head = TX_QUEUE(sk)->next;
3605 pi->next_tx_seq = pi->expected_ack_seq;
3606 l2cap_ertm_send(sk);
3610 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3611 (pi->unacked_frames > 0))
3612 __mod_retrans_timer();
3614 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3615 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3618 l2cap_ertm_send(sk);
3622 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3624 struct l2cap_pinfo *pi = l2cap_pi(sk);
3625 u8 tx_seq = __get_reqseq(rx_control);
3627 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3629 pi->expected_ack_seq = tx_seq;
3630 l2cap_drop_acked_frames(sk);
3632 if (rx_control & L2CAP_CTRL_FINAL) {
3633 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3634 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3636 if (!skb_queue_empty(TX_QUEUE(sk)))
3637 sk->sk_send_head = TX_QUEUE(sk)->next;
3638 pi->next_tx_seq = pi->expected_ack_seq;
3639 l2cap_ertm_send(sk);
3642 if (!skb_queue_empty(TX_QUEUE(sk)))
3643 sk->sk_send_head = TX_QUEUE(sk)->next;
3644 pi->next_tx_seq = pi->expected_ack_seq;
3645 l2cap_ertm_send(sk);
3647 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3648 pi->srej_save_reqseq = tx_seq;
3649 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3653 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3655 struct l2cap_pinfo *pi = l2cap_pi(sk);
3656 u8 tx_seq = __get_reqseq(rx_control);
3658 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3660 if (rx_control & L2CAP_CTRL_POLL) {
3661 pi->expected_ack_seq = tx_seq;
3662 l2cap_drop_acked_frames(sk);
3663 l2cap_retransmit_frame(sk, tx_seq);
3664 l2cap_ertm_send(sk);
3665 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3666 pi->srej_save_reqseq = tx_seq;
3667 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3669 } else if (rx_control & L2CAP_CTRL_FINAL) {
3670 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3671 pi->srej_save_reqseq == tx_seq)
3672 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3674 l2cap_retransmit_frame(sk, tx_seq);
3676 l2cap_retransmit_frame(sk, tx_seq);
3677 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3678 pi->srej_save_reqseq = tx_seq;
3679 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3684 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3686 struct l2cap_pinfo *pi = l2cap_pi(sk);
3687 u8 tx_seq = __get_reqseq(rx_control);
3689 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3690 pi->expected_ack_seq = tx_seq;
3691 l2cap_drop_acked_frames(sk);
3693 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3694 del_timer(&pi->retrans_timer);
3695 if (rx_control & L2CAP_CTRL_POLL) {
3696 u16 control = L2CAP_CTRL_FINAL;
3697 l2cap_send_rr_or_rnr(pi, control);
3702 if (rx_control & L2CAP_CTRL_POLL)
3703 l2cap_send_srejtail(sk);
3705 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3708 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3710 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3712 if (L2CAP_CTRL_FINAL & rx_control) {
3713 del_timer(&l2cap_pi(sk)->monitor_timer);
3714 if (l2cap_pi(sk)->unacked_frames > 0)
3715 __mod_retrans_timer();
3716 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3719 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3720 case L2CAP_SUPER_RCV_READY:
3721 l2cap_data_channel_rrframe(sk, rx_control);
3724 case L2CAP_SUPER_REJECT:
3725 l2cap_data_channel_rejframe(sk, rx_control);
3728 case L2CAP_SUPER_SELECT_REJECT:
3729 l2cap_data_channel_srejframe(sk, rx_control);
3732 case L2CAP_SUPER_RCV_NOT_READY:
3733 l2cap_data_channel_rnrframe(sk, rx_control);
3741 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3744 struct l2cap_pinfo *pi;
3748 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3750 BT_DBG("unknown cid 0x%4.4x", cid);
3756 BT_DBG("sk %p, len %d", sk, skb->len);
3758 if (sk->sk_state != BT_CONNECTED)
3762 case L2CAP_MODE_BASIC:
3763 /* If socket recv buffers overflows we drop data here
3764 * which is *bad* because L2CAP has to be reliable.
3765 * But we don't have any other choice. L2CAP doesn't
3766 * provide flow control mechanism. */
3768 if (pi->imtu < skb->len)
3771 if (!sock_queue_rcv_skb(sk, skb))
3775 case L2CAP_MODE_ERTM:
3776 control = get_unaligned_le16(skb->data);
3780 if (__is_sar_start(control))
3783 if (pi->fcs == L2CAP_FCS_CRC16)
3787 * We can just drop the corrupted I-frame here.
3788 * Receiver will miss it and start proper recovery
3789 * procedures and ask retransmission.
3794 if (l2cap_check_fcs(pi, skb))
3797 if (__is_iframe(control)) {
3801 l2cap_data_channel_iframe(sk, control, skb);
3806 l2cap_data_channel_sframe(sk, control, skb);
3811 case L2CAP_MODE_STREAMING:
3812 control = get_unaligned_le16(skb->data);
3816 if (__is_sar_start(control))
3819 if (pi->fcs == L2CAP_FCS_CRC16)
3822 if (len > pi->mps || len < 4 || __is_sframe(control))
3825 if (l2cap_check_fcs(pi, skb))
3828 tx_seq = __get_txseq(control);
3830 if (pi->expected_tx_seq == tx_seq)
3831 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3833 pi->expected_tx_seq = (tx_seq + 1) % 64;
3835 l2cap_sar_reassembly_sdu(sk, skb, control);
3840 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3854 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3858 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3862 BT_DBG("sk %p, len %d", sk, skb->len);
3864 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3867 if (l2cap_pi(sk)->imtu < skb->len)
3870 if (!sock_queue_rcv_skb(sk, skb))
3882 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3884 struct l2cap_hdr *lh = (void *) skb->data;
3888 skb_pull(skb, L2CAP_HDR_SIZE);
3889 cid = __le16_to_cpu(lh->cid);
3890 len = __le16_to_cpu(lh->len);
3892 if (len != skb->len) {
3897 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3900 case L2CAP_CID_SIGNALING:
3901 l2cap_sig_channel(conn, skb);
3904 case L2CAP_CID_CONN_LESS:
3905 psm = get_unaligned_le16(skb->data);
3907 l2cap_conless_channel(conn, psm, skb);
3911 l2cap_data_channel(conn, cid, skb);
3916 /* ---- L2CAP interface with lower layer (HCI) ---- */
3918 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3920 int exact = 0, lm1 = 0, lm2 = 0;
3921 register struct sock *sk;
3922 struct hlist_node *node;
3924 if (type != ACL_LINK)
3927 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3929 /* Find listening sockets and check their link_mode */
3930 read_lock(&l2cap_sk_list.lock);
3931 sk_for_each(sk, node, &l2cap_sk_list.head) {
3932 if (sk->sk_state != BT_LISTEN)
3935 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3936 lm1 |= HCI_LM_ACCEPT;
3937 if (l2cap_pi(sk)->role_switch)
3938 lm1 |= HCI_LM_MASTER;
3940 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3941 lm2 |= HCI_LM_ACCEPT;
3942 if (l2cap_pi(sk)->role_switch)
3943 lm2 |= HCI_LM_MASTER;
3946 read_unlock(&l2cap_sk_list.lock);
3948 return exact ? lm1 : lm2;
3951 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3953 struct l2cap_conn *conn;
3955 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3957 if (hcon->type != ACL_LINK)
3961 conn = l2cap_conn_add(hcon, status);
3963 l2cap_conn_ready(conn);
3965 l2cap_conn_del(hcon, bt_err(status));
3970 static int l2cap_disconn_ind(struct hci_conn *hcon)
3972 struct l2cap_conn *conn = hcon->l2cap_data;
3974 BT_DBG("hcon %p", hcon);
3976 if (hcon->type != ACL_LINK || !conn)
3979 return conn->disc_reason;
3982 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3984 BT_DBG("hcon %p reason %d", hcon, reason);
3986 if (hcon->type != ACL_LINK)
3989 l2cap_conn_del(hcon, bt_err(reason));
3994 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
3996 if (sk->sk_type != SOCK_SEQPACKET)
3999 if (encrypt == 0x00) {
4000 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4001 l2cap_sock_clear_timer(sk);
4002 l2cap_sock_set_timer(sk, HZ * 5);
4003 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4004 __l2cap_sock_close(sk, ECONNREFUSED);
4006 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4007 l2cap_sock_clear_timer(sk);
4011 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4013 struct l2cap_chan_list *l;
4014 struct l2cap_conn *conn = hcon->l2cap_data;
4020 l = &conn->chan_list;
4022 BT_DBG("conn %p", conn);
4024 read_lock(&l->lock);
4026 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4029 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4034 if (!status && (sk->sk_state == BT_CONNECTED ||
4035 sk->sk_state == BT_CONFIG)) {
4036 l2cap_check_encryption(sk, encrypt);
4041 if (sk->sk_state == BT_CONNECT) {
4043 struct l2cap_conn_req req;
4044 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4045 req.psm = l2cap_pi(sk)->psm;
4047 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4049 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4050 L2CAP_CONN_REQ, sizeof(req), &req);
4052 l2cap_sock_clear_timer(sk);
4053 l2cap_sock_set_timer(sk, HZ / 10);
4055 } else if (sk->sk_state == BT_CONNECT2) {
4056 struct l2cap_conn_rsp rsp;
4060 sk->sk_state = BT_CONFIG;
4061 result = L2CAP_CR_SUCCESS;
4063 sk->sk_state = BT_DISCONN;
4064 l2cap_sock_set_timer(sk, HZ / 10);
4065 result = L2CAP_CR_SEC_BLOCK;
4068 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4069 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4070 rsp.result = cpu_to_le16(result);
4071 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4072 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4073 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4079 read_unlock(&l->lock);
4084 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4086 struct l2cap_conn *conn = hcon->l2cap_data;
4088 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4091 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4093 if (flags & ACL_START) {
4094 struct l2cap_hdr *hdr;
4098 BT_ERR("Unexpected start frame (len %d)", skb->len);
4099 kfree_skb(conn->rx_skb);
4100 conn->rx_skb = NULL;
4102 l2cap_conn_unreliable(conn, ECOMM);
4106 BT_ERR("Frame is too short (len %d)", skb->len);
4107 l2cap_conn_unreliable(conn, ECOMM);
4111 hdr = (struct l2cap_hdr *) skb->data;
4112 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4114 if (len == skb->len) {
4115 /* Complete frame received */
4116 l2cap_recv_frame(conn, skb);
4120 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4122 if (skb->len > len) {
4123 BT_ERR("Frame is too long (len %d, expected len %d)",
4125 l2cap_conn_unreliable(conn, ECOMM);
4129 /* Allocate skb for the complete frame (with header) */
4130 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4134 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4136 conn->rx_len = len - skb->len;
4138 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4140 if (!conn->rx_len) {
4141 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4142 l2cap_conn_unreliable(conn, ECOMM);
4146 if (skb->len > conn->rx_len) {
4147 BT_ERR("Fragment is too long (len %d, expected %d)",
4148 skb->len, conn->rx_len);
4149 kfree_skb(conn->rx_skb);
4150 conn->rx_skb = NULL;
4152 l2cap_conn_unreliable(conn, ECOMM);
4156 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4158 conn->rx_len -= skb->len;
4160 if (!conn->rx_len) {
4161 /* Complete frame received */
4162 l2cap_recv_frame(conn, conn->rx_skb);
4163 conn->rx_skb = NULL;
4172 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4175 struct hlist_node *node;
4177 read_lock_bh(&l2cap_sk_list.lock);
4179 sk_for_each(sk, node, &l2cap_sk_list.head) {
4180 struct l2cap_pinfo *pi = l2cap_pi(sk);
4182 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4183 batostr(&bt_sk(sk)->src),
4184 batostr(&bt_sk(sk)->dst),
4185 sk->sk_state, __le16_to_cpu(pi->psm),
4187 pi->imtu, pi->omtu, pi->sec_level);
4190 read_unlock_bh(&l2cap_sk_list.lock);
4195 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4197 return single_open(file, l2cap_debugfs_show, inode->i_private);
4200 static const struct file_operations l2cap_debugfs_fops = {
4201 .open = l2cap_debugfs_open,
4203 .llseek = seq_lseek,
4204 .release = single_release,
4207 static struct dentry *l2cap_debugfs;
4209 static const struct proto_ops l2cap_sock_ops = {
4210 .family = PF_BLUETOOTH,
4211 .owner = THIS_MODULE,
4212 .release = l2cap_sock_release,
4213 .bind = l2cap_sock_bind,
4214 .connect = l2cap_sock_connect,
4215 .listen = l2cap_sock_listen,
4216 .accept = l2cap_sock_accept,
4217 .getname = l2cap_sock_getname,
4218 .sendmsg = l2cap_sock_sendmsg,
4219 .recvmsg = l2cap_sock_recvmsg,
4220 .poll = bt_sock_poll,
4221 .ioctl = bt_sock_ioctl,
4222 .mmap = sock_no_mmap,
4223 .socketpair = sock_no_socketpair,
4224 .shutdown = l2cap_sock_shutdown,
4225 .setsockopt = l2cap_sock_setsockopt,
4226 .getsockopt = l2cap_sock_getsockopt
4229 static const struct net_proto_family l2cap_sock_family_ops = {
4230 .family = PF_BLUETOOTH,
4231 .owner = THIS_MODULE,
4232 .create = l2cap_sock_create,
4235 static struct hci_proto l2cap_hci_proto = {
4237 .id = HCI_PROTO_L2CAP,
4238 .connect_ind = l2cap_connect_ind,
4239 .connect_cfm = l2cap_connect_cfm,
4240 .disconn_ind = l2cap_disconn_ind,
4241 .disconn_cfm = l2cap_disconn_cfm,
4242 .security_cfm = l2cap_security_cfm,
4243 .recv_acldata = l2cap_recv_acldata
4246 static int __init l2cap_init(void)
4250 err = proto_register(&l2cap_proto, 0);
4254 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4256 BT_ERR("L2CAP socket registration failed");
4260 err = hci_register_proto(&l2cap_hci_proto);
4262 BT_ERR("L2CAP protocol registration failed");
4263 bt_sock_unregister(BTPROTO_L2CAP);
4268 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4269 bt_debugfs, NULL, &l2cap_debugfs_fops);
4271 BT_ERR("Failed to create L2CAP debug file");
4274 BT_INFO("L2CAP ver %s", VERSION);
4275 BT_INFO("L2CAP socket layer initialized");
4280 proto_unregister(&l2cap_proto);
4284 static void __exit l2cap_exit(void)
4286 debugfs_remove(l2cap_debugfs);
4288 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4289 BT_ERR("L2CAP socket unregistration failed");
4291 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4292 BT_ERR("L2CAP protocol unregistration failed");
4294 proto_unregister(&l2cap_proto);
4297 void l2cap_load(void)
4299 /* Dummy function to trigger automatic L2CAP module loading by
4300 * other modules that use L2CAP sockets but don't use any other
4301 * symbols from it. */
4304 EXPORT_SYMBOL(l2cap_load);
4306 module_init(l2cap_init);
4307 module_exit(l2cap_exit);
4309 module_param(enable_ertm, bool, 0644);
4310 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4312 module_param(max_transmit, uint, 0644);
4313 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4315 module_param(tx_window, uint, 0644);
4316 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4318 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4319 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4320 MODULE_VERSION(VERSION);
4321 MODULE_LICENSE("GPL");
4322 MODULE_ALIAS("bt-proto-0");