2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_timeout(unsigned long arg)
88 struct sock *sk = (struct sock *) arg;
91 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
96 reason = ECONNREFUSED;
97 else if (sk->sk_state == BT_CONNECT &&
98 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
99 reason = ECONNREFUSED;
103 __l2cap_sock_close(sk, reason);
111 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
114 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
117 static void l2cap_sock_clear_timer(struct sock *sk)
119 BT_DBG("sock %p state %d", sk, sk->sk_state);
120 sk_stop_timer(sk, &sk->sk_timer);
123 /* ---- L2CAP channels ---- */
124 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
127 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
128 if (l2cap_pi(s)->dcid == cid)
134 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
137 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
138 if (l2cap_pi(s)->scid == cid)
144 /* Find channel with given SCID.
145 * Returns locked socket */
146 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
150 s = __l2cap_get_chan_by_scid(l, cid);
153 read_unlock(&l->lock);
157 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
160 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
161 if (l2cap_pi(s)->ident == ident)
167 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
171 s = __l2cap_get_chan_by_ident(l, ident);
174 read_unlock(&l->lock);
178 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 u16 cid = L2CAP_CID_DYN_START;
182 for (; cid < L2CAP_CID_DYN_END; cid++) {
183 if (!__l2cap_get_chan_by_scid(l, cid))
190 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
195 l2cap_pi(l->head)->prev_c = sk;
197 l2cap_pi(sk)->next_c = l->head;
198 l2cap_pi(sk)->prev_c = NULL;
202 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206 write_lock_bh(&l->lock);
211 l2cap_pi(next)->prev_c = prev;
213 l2cap_pi(prev)->next_c = next;
214 write_unlock_bh(&l->lock);
219 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 struct l2cap_chan_list *l = &conn->chan_list;
223 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
224 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226 conn->disc_reason = 0x13;
228 l2cap_pi(sk)->conn = conn;
230 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
231 /* Alloc CID for connection-oriented socket */
232 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
233 } else if (sk->sk_type == SOCK_DGRAM) {
234 /* Connectionless socket */
235 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
236 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
239 /* Raw socket can send/recv signalling messages only */
240 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
241 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
245 __l2cap_chan_link(l, sk);
248 bt_accept_enqueue(parent, sk);
252 * Must be called on the locked socket. */
253 static void l2cap_chan_del(struct sock *sk, int err)
255 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
256 struct sock *parent = bt_sk(sk)->parent;
258 l2cap_sock_clear_timer(sk);
260 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
263 /* Unlink from channel list */
264 l2cap_chan_unlink(&conn->chan_list, sk);
265 l2cap_pi(sk)->conn = NULL;
266 hci_conn_put(conn->hcon);
269 sk->sk_state = BT_CLOSED;
270 sock_set_flag(sk, SOCK_ZAPPED);
276 bt_accept_unlink(sk);
277 parent->sk_data_ready(parent, 0);
279 sk->sk_state_change(sk);
281 skb_queue_purge(TX_QUEUE(sk));
283 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
284 struct srej_list *l, *tmp;
286 del_timer(&l2cap_pi(sk)->retrans_timer);
287 del_timer(&l2cap_pi(sk)->monitor_timer);
288 del_timer(&l2cap_pi(sk)->ack_timer);
290 skb_queue_purge(SREJ_QUEUE(sk));
291 skb_queue_purge(BUSY_QUEUE(sk));
293 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
300 /* Service level security */
301 static inline int l2cap_check_security(struct sock *sk)
303 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
306 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
308 auth_type = HCI_AT_NO_BONDING_MITM;
310 auth_type = HCI_AT_NO_BONDING;
312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
313 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
315 switch (l2cap_pi(sk)->sec_level) {
316 case BT_SECURITY_HIGH:
317 auth_type = HCI_AT_GENERAL_BONDING_MITM;
319 case BT_SECURITY_MEDIUM:
320 auth_type = HCI_AT_GENERAL_BONDING;
323 auth_type = HCI_AT_NO_BONDING;
328 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
332 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
336 /* Get next available identificator.
337 * 1 - 128 are used by kernel.
338 * 129 - 199 are reserved.
339 * 200 - 254 are used by utilities like l2ping, etc.
342 spin_lock_bh(&conn->lock);
344 if (++conn->tx_ident > 128)
349 spin_unlock_bh(&conn->lock);
354 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
356 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
358 BT_DBG("code 0x%2.2x", code);
363 hci_send_acl(conn->hcon, skb, 0);
366 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
369 struct l2cap_hdr *lh;
370 struct l2cap_conn *conn = pi->conn;
371 struct sock *sk = (struct sock *)pi;
372 int count, hlen = L2CAP_HDR_SIZE + 2;
374 if (sk->sk_state != BT_CONNECTED)
377 if (pi->fcs == L2CAP_FCS_CRC16)
380 BT_DBG("pi %p, control 0x%2.2x", pi, control);
382 count = min_t(unsigned int, conn->mtu, hlen);
383 control |= L2CAP_CTRL_FRAME_TYPE;
385 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
386 control |= L2CAP_CTRL_FINAL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
390 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
391 control |= L2CAP_CTRL_POLL;
392 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
395 skb = bt_skb_alloc(count, GFP_ATOMIC);
399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
400 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
401 lh->cid = cpu_to_le16(pi->dcid);
402 put_unaligned_le16(control, skb_put(skb, 2));
404 if (pi->fcs == L2CAP_FCS_CRC16) {
405 u16 fcs = crc16(0, (u8 *)lh, count - 2);
406 put_unaligned_le16(fcs, skb_put(skb, 2));
409 hci_send_acl(pi->conn->hcon, skb, 0);
412 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
414 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
415 control |= L2CAP_SUPER_RCV_NOT_READY;
416 pi->conn_state |= L2CAP_CONN_RNR_SENT;
418 control |= L2CAP_SUPER_RCV_READY;
420 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
422 l2cap_send_sframe(pi, control);
425 static inline int __l2cap_no_conn_pending(struct sock *sk)
427 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
430 static void l2cap_do_start(struct sock *sk)
432 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
434 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
435 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
438 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
439 struct l2cap_conn_req req;
440 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
441 req.psm = l2cap_pi(sk)->psm;
443 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
444 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
446 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
447 L2CAP_CONN_REQ, sizeof(req), &req);
450 struct l2cap_info_req req;
451 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
453 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
454 conn->info_ident = l2cap_get_ident(conn);
456 mod_timer(&conn->info_timer, jiffies +
457 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
459 l2cap_send_cmd(conn, conn->info_ident,
460 L2CAP_INFO_REQ, sizeof(req), &req);
464 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
466 u32 local_feat_mask = l2cap_feat_mask;
468 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
471 case L2CAP_MODE_ERTM:
472 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
473 case L2CAP_MODE_STREAMING:
474 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
480 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
482 struct l2cap_disconn_req req;
487 skb_queue_purge(TX_QUEUE(sk));
489 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
490 del_timer(&l2cap_pi(sk)->retrans_timer);
491 del_timer(&l2cap_pi(sk)->monitor_timer);
492 del_timer(&l2cap_pi(sk)->ack_timer);
495 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
496 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
497 l2cap_send_cmd(conn, l2cap_get_ident(conn),
498 L2CAP_DISCONN_REQ, sizeof(req), &req);
500 sk->sk_state = BT_DISCONN;
504 /* ---- L2CAP connections ---- */
505 static void l2cap_conn_start(struct l2cap_conn *conn)
507 struct l2cap_chan_list *l = &conn->chan_list;
508 struct sock_del_list del, *tmp1, *tmp2;
511 BT_DBG("conn %p", conn);
513 INIT_LIST_HEAD(&del.list);
517 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
520 if (sk->sk_type != SOCK_SEQPACKET &&
521 sk->sk_type != SOCK_STREAM) {
526 if (sk->sk_state == BT_CONNECT) {
527 struct l2cap_conn_req req;
529 if (!l2cap_check_security(sk) ||
530 !__l2cap_no_conn_pending(sk)) {
535 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
537 && l2cap_pi(sk)->conf_state &
538 L2CAP_CONF_STATE2_DEVICE) {
539 tmp1 = kzalloc(sizeof(struct sock_del_list),
542 list_add_tail(&tmp1->list, &del.list);
547 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
548 req.psm = l2cap_pi(sk)->psm;
550 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
551 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
553 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
554 L2CAP_CONN_REQ, sizeof(req), &req);
556 } else if (sk->sk_state == BT_CONNECT2) {
557 struct l2cap_conn_rsp rsp;
559 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
560 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
562 if (l2cap_check_security(sk)) {
563 if (bt_sk(sk)->defer_setup) {
564 struct sock *parent = bt_sk(sk)->parent;
565 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
566 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
567 parent->sk_data_ready(parent, 0);
570 sk->sk_state = BT_CONFIG;
571 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
572 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
575 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
576 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
579 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
580 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
582 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
583 rsp.result != L2CAP_CR_SUCCESS) {
588 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
589 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
590 l2cap_build_conf_req(sk, buf), buf);
591 l2cap_pi(sk)->num_conf_req++;
597 read_unlock(&l->lock);
599 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
600 bh_lock_sock(tmp1->sk);
601 __l2cap_sock_close(tmp1->sk, ECONNRESET);
602 bh_unlock_sock(tmp1->sk);
603 list_del(&tmp1->list);
608 static void l2cap_conn_ready(struct l2cap_conn *conn)
610 struct l2cap_chan_list *l = &conn->chan_list;
613 BT_DBG("conn %p", conn);
617 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
620 if (sk->sk_type != SOCK_SEQPACKET &&
621 sk->sk_type != SOCK_STREAM) {
622 l2cap_sock_clear_timer(sk);
623 sk->sk_state = BT_CONNECTED;
624 sk->sk_state_change(sk);
625 } else if (sk->sk_state == BT_CONNECT)
631 read_unlock(&l->lock);
634 /* Notify sockets that we cannot guaranty reliability anymore */
635 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
637 struct l2cap_chan_list *l = &conn->chan_list;
640 BT_DBG("conn %p", conn);
644 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
645 if (l2cap_pi(sk)->force_reliable)
649 read_unlock(&l->lock);
652 static void l2cap_info_timeout(unsigned long arg)
654 struct l2cap_conn *conn = (void *) arg;
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
657 conn->info_ident = 0;
659 l2cap_conn_start(conn);
662 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
664 struct l2cap_conn *conn = hcon->l2cap_data;
669 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
673 hcon->l2cap_data = conn;
676 BT_DBG("hcon %p conn %p", hcon, conn);
678 conn->mtu = hcon->hdev->acl_mtu;
679 conn->src = &hcon->hdev->bdaddr;
680 conn->dst = &hcon->dst;
684 spin_lock_init(&conn->lock);
685 rwlock_init(&conn->chan_list.lock);
687 setup_timer(&conn->info_timer, l2cap_info_timeout,
688 (unsigned long) conn);
690 conn->disc_reason = 0x13;
695 static void l2cap_conn_del(struct hci_conn *hcon, int err)
697 struct l2cap_conn *conn = hcon->l2cap_data;
703 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
705 kfree_skb(conn->rx_skb);
708 while ((sk = conn->chan_list.head)) {
710 l2cap_chan_del(sk, err);
715 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
716 del_timer_sync(&conn->info_timer);
718 hcon->l2cap_data = NULL;
722 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
724 struct l2cap_chan_list *l = &conn->chan_list;
725 write_lock_bh(&l->lock);
726 __l2cap_chan_add(conn, sk, parent);
727 write_unlock_bh(&l->lock);
730 /* ---- Socket interface ---- */
731 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
734 struct hlist_node *node;
735 sk_for_each(sk, node, &l2cap_sk_list.head)
736 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
743 /* Find socket with psm and source bdaddr.
744 * Returns closest match.
746 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
748 struct sock *sk = NULL, *sk1 = NULL;
749 struct hlist_node *node;
751 sk_for_each(sk, node, &l2cap_sk_list.head) {
752 if (state && sk->sk_state != state)
755 if (l2cap_pi(sk)->psm == psm) {
757 if (!bacmp(&bt_sk(sk)->src, src))
761 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
765 return node ? sk : sk1;
768 /* Find socket with given address (psm, src).
769 * Returns locked socket */
770 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
773 read_lock(&l2cap_sk_list.lock);
774 s = __l2cap_get_sock_by_psm(state, psm, src);
777 read_unlock(&l2cap_sk_list.lock);
781 static void l2cap_sock_destruct(struct sock *sk)
785 skb_queue_purge(&sk->sk_receive_queue);
786 skb_queue_purge(&sk->sk_write_queue);
789 static void l2cap_sock_cleanup_listen(struct sock *parent)
793 BT_DBG("parent %p", parent);
795 /* Close not yet accepted channels */
796 while ((sk = bt_accept_dequeue(parent, NULL)))
797 l2cap_sock_close(sk);
799 parent->sk_state = BT_CLOSED;
800 sock_set_flag(parent, SOCK_ZAPPED);
803 /* Kill socket (only if zapped and orphan)
804 * Must be called on unlocked socket.
806 static void l2cap_sock_kill(struct sock *sk)
808 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
811 BT_DBG("sk %p state %d", sk, sk->sk_state);
813 /* Kill poor orphan */
814 bt_sock_unlink(&l2cap_sk_list, sk);
815 sock_set_flag(sk, SOCK_DEAD);
819 static void __l2cap_sock_close(struct sock *sk, int reason)
821 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
823 switch (sk->sk_state) {
825 l2cap_sock_cleanup_listen(sk);
830 if (sk->sk_type == SOCK_SEQPACKET ||
831 sk->sk_type == SOCK_STREAM) {
832 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
834 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
835 l2cap_send_disconn_req(conn, sk, reason);
837 l2cap_chan_del(sk, reason);
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
844 struct l2cap_conn_rsp rsp;
847 if (bt_sk(sk)->defer_setup)
848 result = L2CAP_CR_SEC_BLOCK;
850 result = L2CAP_CR_BAD_PSM;
852 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
853 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
854 rsp.result = cpu_to_le16(result);
855 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
856 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
857 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
859 l2cap_chan_del(sk, reason);
864 l2cap_chan_del(sk, reason);
868 sock_set_flag(sk, SOCK_ZAPPED);
873 /* Must be called on unlocked socket. */
874 static void l2cap_sock_close(struct sock *sk)
876 l2cap_sock_clear_timer(sk);
878 __l2cap_sock_close(sk, ECONNRESET);
883 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
885 struct l2cap_pinfo *pi = l2cap_pi(sk);
890 sk->sk_type = parent->sk_type;
891 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
893 pi->imtu = l2cap_pi(parent)->imtu;
894 pi->omtu = l2cap_pi(parent)->omtu;
895 pi->conf_state = l2cap_pi(parent)->conf_state;
896 pi->mode = l2cap_pi(parent)->mode;
897 pi->fcs = l2cap_pi(parent)->fcs;
898 pi->max_tx = l2cap_pi(parent)->max_tx;
899 pi->tx_win = l2cap_pi(parent)->tx_win;
900 pi->sec_level = l2cap_pi(parent)->sec_level;
901 pi->role_switch = l2cap_pi(parent)->role_switch;
902 pi->force_reliable = l2cap_pi(parent)->force_reliable;
904 pi->imtu = L2CAP_DEFAULT_MTU;
906 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
907 pi->mode = L2CAP_MODE_ERTM;
908 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
910 pi->mode = L2CAP_MODE_BASIC;
912 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
913 pi->fcs = L2CAP_FCS_CRC16;
914 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
915 pi->sec_level = BT_SECURITY_LOW;
917 pi->force_reliable = 0;
920 /* Default config options */
922 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
923 skb_queue_head_init(TX_QUEUE(sk));
924 skb_queue_head_init(SREJ_QUEUE(sk));
925 skb_queue_head_init(BUSY_QUEUE(sk));
926 INIT_LIST_HEAD(SREJ_LIST(sk));
929 static struct proto l2cap_proto = {
931 .owner = THIS_MODULE,
932 .obj_size = sizeof(struct l2cap_pinfo)
935 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
939 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
943 sock_init_data(sock, sk);
944 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
946 sk->sk_destruct = l2cap_sock_destruct;
947 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
949 sock_reset_flag(sk, SOCK_ZAPPED);
951 sk->sk_protocol = proto;
952 sk->sk_state = BT_OPEN;
954 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
956 bt_sock_link(&l2cap_sk_list, sk);
960 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
965 BT_DBG("sock %p", sock);
967 sock->state = SS_UNCONNECTED;
969 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
970 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
971 return -ESOCKTNOSUPPORT;
973 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
976 sock->ops = &l2cap_sock_ops;
978 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
982 l2cap_sock_init(sk, NULL);
986 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
988 struct sock *sk = sock->sk;
989 struct sockaddr_l2 la;
994 if (!addr || addr->sa_family != AF_BLUETOOTH)
997 memset(&la, 0, sizeof(la));
998 len = min_t(unsigned int, sizeof(la), alen);
999 memcpy(&la, addr, len);
1006 if (sk->sk_state != BT_OPEN) {
1011 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1012 !capable(CAP_NET_BIND_SERVICE)) {
1017 write_lock_bh(&l2cap_sk_list.lock);
1019 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1022 /* Save source address */
1023 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1024 l2cap_pi(sk)->psm = la.l2_psm;
1025 l2cap_pi(sk)->sport = la.l2_psm;
1026 sk->sk_state = BT_BOUND;
1028 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1029 __le16_to_cpu(la.l2_psm) == 0x0003)
1030 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1033 write_unlock_bh(&l2cap_sk_list.lock);
1040 static int l2cap_do_connect(struct sock *sk)
1042 bdaddr_t *src = &bt_sk(sk)->src;
1043 bdaddr_t *dst = &bt_sk(sk)->dst;
1044 struct l2cap_conn *conn;
1045 struct hci_conn *hcon;
1046 struct hci_dev *hdev;
1050 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1053 hdev = hci_get_route(dst, src);
1055 return -EHOSTUNREACH;
1057 hci_dev_lock_bh(hdev);
1061 if (sk->sk_type == SOCK_RAW) {
1062 switch (l2cap_pi(sk)->sec_level) {
1063 case BT_SECURITY_HIGH:
1064 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1066 case BT_SECURITY_MEDIUM:
1067 auth_type = HCI_AT_DEDICATED_BONDING;
1070 auth_type = HCI_AT_NO_BONDING;
1073 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1074 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1075 auth_type = HCI_AT_NO_BONDING_MITM;
1077 auth_type = HCI_AT_NO_BONDING;
1079 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1080 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1082 switch (l2cap_pi(sk)->sec_level) {
1083 case BT_SECURITY_HIGH:
1084 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1086 case BT_SECURITY_MEDIUM:
1087 auth_type = HCI_AT_GENERAL_BONDING;
1090 auth_type = HCI_AT_NO_BONDING;
1095 hcon = hci_connect(hdev, ACL_LINK, dst,
1096 l2cap_pi(sk)->sec_level, auth_type);
1100 conn = l2cap_conn_add(hcon, 0);
1108 /* Update source addr of the socket */
1109 bacpy(src, conn->src);
1111 l2cap_chan_add(conn, sk, NULL);
1113 sk->sk_state = BT_CONNECT;
1114 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1116 if (hcon->state == BT_CONNECTED) {
1117 if (sk->sk_type != SOCK_SEQPACKET &&
1118 sk->sk_type != SOCK_STREAM) {
1119 l2cap_sock_clear_timer(sk);
1120 sk->sk_state = BT_CONNECTED;
1126 hci_dev_unlock_bh(hdev);
1131 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1133 struct sock *sk = sock->sk;
1134 struct sockaddr_l2 la;
1137 BT_DBG("sk %p", sk);
1139 if (!addr || alen < sizeof(addr->sa_family) ||
1140 addr->sa_family != AF_BLUETOOTH)
1143 memset(&la, 0, sizeof(la));
1144 len = min_t(unsigned int, sizeof(la), alen);
1145 memcpy(&la, addr, len);
1152 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1171 switch (sk->sk_state) {
1175 /* Already connecting */
1179 /* Already connected */
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1195 l2cap_pi(sk)->psm = la.l2_psm;
1197 err = l2cap_do_connect(sk);
1202 err = bt_sock_wait_state(sk, BT_CONNECTED,
1203 sock_sndtimeo(sk, flags & O_NONBLOCK));
1209 static int l2cap_sock_listen(struct socket *sock, int backlog)
1211 struct sock *sk = sock->sk;
1214 BT_DBG("sk %p backlog %d", sk, backlog);
1218 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1219 || sk->sk_state != BT_BOUND) {
1224 switch (l2cap_pi(sk)->mode) {
1225 case L2CAP_MODE_BASIC:
1227 case L2CAP_MODE_ERTM:
1228 case L2CAP_MODE_STREAMING:
1237 if (!l2cap_pi(sk)->psm) {
1238 bdaddr_t *src = &bt_sk(sk)->src;
1243 write_lock_bh(&l2cap_sk_list.lock);
1245 for (psm = 0x1001; psm < 0x1100; psm += 2)
1246 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1247 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1248 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1253 write_unlock_bh(&l2cap_sk_list.lock);
1259 sk->sk_max_ack_backlog = backlog;
1260 sk->sk_ack_backlog = 0;
1261 sk->sk_state = BT_LISTEN;
1268 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1270 DECLARE_WAITQUEUE(wait, current);
1271 struct sock *sk = sock->sk, *nsk;
1275 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1277 if (sk->sk_state != BT_LISTEN) {
1282 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1284 BT_DBG("sk %p timeo %ld", sk, timeo);
1286 /* Wait for an incoming connection. (wake-one). */
1287 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1288 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1299 if (sk->sk_state != BT_LISTEN) {
1304 if (signal_pending(current)) {
1305 err = sock_intr_errno(timeo);
1309 set_current_state(TASK_RUNNING);
1310 remove_wait_queue(sk_sleep(sk), &wait);
1315 newsock->state = SS_CONNECTED;
1317 BT_DBG("new socket %p", nsk);
1324 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1326 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1327 struct sock *sk = sock->sk;
1329 BT_DBG("sock %p, sk %p", sock, sk);
1331 addr->sa_family = AF_BLUETOOTH;
1332 *len = sizeof(struct sockaddr_l2);
1335 la->l2_psm = l2cap_pi(sk)->psm;
1336 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1337 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1339 la->l2_psm = l2cap_pi(sk)->sport;
1340 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1341 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1347 static int __l2cap_wait_ack(struct sock *sk)
1349 DECLARE_WAITQUEUE(wait, current);
1353 add_wait_queue(sk_sleep(sk), &wait);
1354 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1355 set_current_state(TASK_INTERRUPTIBLE);
1360 if (signal_pending(current)) {
1361 err = sock_intr_errno(timeo);
1366 timeo = schedule_timeout(timeo);
1369 err = sock_error(sk);
1373 set_current_state(TASK_RUNNING);
1374 remove_wait_queue(sk_sleep(sk), &wait);
1378 static void l2cap_monitor_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1385 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1386 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1391 l2cap_pi(sk)->retry_count++;
1392 __mod_monitor_timer();
1394 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1398 static void l2cap_retrans_timeout(unsigned long arg)
1400 struct sock *sk = (void *) arg;
1402 BT_DBG("sk %p", sk);
1405 l2cap_pi(sk)->retry_count = 1;
1406 __mod_monitor_timer();
1408 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1410 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1414 static void l2cap_drop_acked_frames(struct sock *sk)
1416 struct sk_buff *skb;
1418 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1419 l2cap_pi(sk)->unacked_frames) {
1420 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1423 skb = skb_dequeue(TX_QUEUE(sk));
1426 l2cap_pi(sk)->unacked_frames--;
1429 if (!l2cap_pi(sk)->unacked_frames)
1430 del_timer(&l2cap_pi(sk)->retrans_timer);
1433 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1435 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1439 hci_send_acl(pi->conn->hcon, skb, 0);
1442 static void l2cap_streaming_send(struct sock *sk)
1444 struct sk_buff *skb;
1445 struct l2cap_pinfo *pi = l2cap_pi(sk);
1448 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1449 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1455 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1458 l2cap_do_send(sk, skb);
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1464 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1466 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 struct sk_buff *skb, *tx_skb;
1470 skb = skb_peek(TX_QUEUE(sk));
1475 if (bt_cb(skb)->tx_seq == tx_seq)
1478 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1481 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1483 if (pi->remote_max_tx &&
1484 bt_cb(skb)->retries == pi->remote_max_tx) {
1485 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1489 tx_skb = skb_clone(skb, GFP_ATOMIC);
1490 bt_cb(skb)->retries++;
1491 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1493 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1494 control |= L2CAP_CTRL_FINAL;
1495 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1498 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1499 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1501 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1503 if (pi->fcs == L2CAP_FCS_CRC16) {
1504 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1505 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1508 l2cap_do_send(sk, tx_skb);
1511 static int l2cap_ertm_send(struct sock *sk)
1513 struct sk_buff *skb, *tx_skb;
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1518 if (sk->sk_state != BT_CONNECTED)
1521 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1523 if (pi->remote_max_tx &&
1524 bt_cb(skb)->retries == pi->remote_max_tx) {
1525 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1529 tx_skb = skb_clone(skb, GFP_ATOMIC);
1531 bt_cb(skb)->retries++;
1533 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1534 control &= L2CAP_CTRL_SAR;
1536 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1537 control |= L2CAP_CTRL_FINAL;
1538 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1540 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1541 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1542 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1545 if (pi->fcs == L2CAP_FCS_CRC16) {
1546 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1547 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1550 l2cap_do_send(sk, tx_skb);
1552 __mod_retrans_timer();
1554 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1555 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1557 pi->unacked_frames++;
1560 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1561 sk->sk_send_head = NULL;
1563 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1571 static int l2cap_retransmit_frames(struct sock *sk)
1573 struct l2cap_pinfo *pi = l2cap_pi(sk);
1576 if (!skb_queue_empty(TX_QUEUE(sk)))
1577 sk->sk_send_head = TX_QUEUE(sk)->next;
1579 pi->next_tx_seq = pi->expected_ack_seq;
1580 ret = l2cap_ertm_send(sk);
1584 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1586 struct sock *sk = (struct sock *)pi;
1589 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1591 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1592 control |= L2CAP_SUPER_RCV_NOT_READY;
1593 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1594 l2cap_send_sframe(pi, control);
1598 if (l2cap_ertm_send(sk) > 0)
1601 control |= L2CAP_SUPER_RCV_READY;
1602 l2cap_send_sframe(pi, control);
1605 static void l2cap_send_srejtail(struct sock *sk)
1607 struct srej_list *tail;
1610 control = L2CAP_SUPER_SELECT_REJECT;
1611 control |= L2CAP_CTRL_FINAL;
1613 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1614 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1616 l2cap_send_sframe(l2cap_pi(sk), control);
1619 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1622 struct sk_buff **frag;
1625 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1631 /* Continuation fragments (no L2CAP header) */
1632 frag = &skb_shinfo(skb)->frag_list;
1634 count = min_t(unsigned int, conn->mtu, len);
1636 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1639 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1645 frag = &(*frag)->next;
1651 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1653 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1654 struct sk_buff *skb;
1655 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1656 struct l2cap_hdr *lh;
1658 BT_DBG("sk %p len %d", sk, (int)len);
1660 count = min_t(unsigned int, (conn->mtu - hlen), len);
1661 skb = bt_skb_send_alloc(sk, count + hlen,
1662 msg->msg_flags & MSG_DONTWAIT, &err);
1664 return ERR_PTR(-ENOMEM);
1666 /* Create L2CAP header */
1667 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1668 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1669 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1672 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1675 return ERR_PTR(err);
1680 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1682 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1683 struct sk_buff *skb;
1684 int err, count, hlen = L2CAP_HDR_SIZE;
1685 struct l2cap_hdr *lh;
1687 BT_DBG("sk %p len %d", sk, (int)len);
1689 count = min_t(unsigned int, (conn->mtu - hlen), len);
1690 skb = bt_skb_send_alloc(sk, count + hlen,
1691 msg->msg_flags & MSG_DONTWAIT, &err);
1693 return ERR_PTR(-ENOMEM);
1695 /* Create L2CAP header */
1696 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1697 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1698 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1700 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1701 if (unlikely(err < 0)) {
1703 return ERR_PTR(err);
1708 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1710 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1711 struct sk_buff *skb;
1712 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1713 struct l2cap_hdr *lh;
1715 BT_DBG("sk %p len %d", sk, (int)len);
1718 return ERR_PTR(-ENOTCONN);
1723 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1726 count = min_t(unsigned int, (conn->mtu - hlen), len);
1727 skb = bt_skb_send_alloc(sk, count + hlen,
1728 msg->msg_flags & MSG_DONTWAIT, &err);
1730 return ERR_PTR(-ENOMEM);
1732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1736 put_unaligned_le16(control, skb_put(skb, 2));
1738 put_unaligned_le16(sdulen, skb_put(skb, 2));
1740 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1741 if (unlikely(err < 0)) {
1743 return ERR_PTR(err);
1746 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1747 put_unaligned_le16(0, skb_put(skb, 2));
1749 bt_cb(skb)->retries = 0;
1753 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1757 struct sk_buff_head sar_queue;
1761 skb_queue_head_init(&sar_queue);
1762 control = L2CAP_SDU_START;
1763 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1765 return PTR_ERR(skb);
1767 __skb_queue_tail(&sar_queue, skb);
1768 len -= pi->remote_mps;
1769 size += pi->remote_mps;
1774 if (len > pi->remote_mps) {
1775 control = L2CAP_SDU_CONTINUE;
1776 buflen = pi->remote_mps;
1778 control = L2CAP_SDU_END;
1782 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1784 skb_queue_purge(&sar_queue);
1785 return PTR_ERR(skb);
1788 __skb_queue_tail(&sar_queue, skb);
1792 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1793 if (sk->sk_send_head == NULL)
1794 sk->sk_send_head = sar_queue.next;
1799 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1801 struct sock *sk = sock->sk;
1802 struct l2cap_pinfo *pi = l2cap_pi(sk);
1803 struct sk_buff *skb;
1807 BT_DBG("sock %p, sk %p", sock, sk);
1809 err = sock_error(sk);
1813 if (msg->msg_flags & MSG_OOB)
1818 if (sk->sk_state != BT_CONNECTED) {
1823 /* Connectionless channel */
1824 if (sk->sk_type == SOCK_DGRAM) {
1825 skb = l2cap_create_connless_pdu(sk, msg, len);
1829 l2cap_do_send(sk, skb);
1836 case L2CAP_MODE_BASIC:
1837 /* Check outgoing MTU */
1838 if (len > pi->omtu) {
1843 /* Create a basic PDU */
1844 skb = l2cap_create_basic_pdu(sk, msg, len);
1850 l2cap_do_send(sk, skb);
1854 case L2CAP_MODE_ERTM:
1855 case L2CAP_MODE_STREAMING:
1856 /* Entire SDU fits into one PDU */
1857 if (len <= pi->remote_mps) {
1858 control = L2CAP_SDU_UNSEGMENTED;
1859 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1864 __skb_queue_tail(TX_QUEUE(sk), skb);
1866 if (sk->sk_send_head == NULL)
1867 sk->sk_send_head = skb;
1870 /* Segment SDU into multiples PDUs */
1871 err = l2cap_sar_segment_sdu(sk, msg, len);
1876 if (pi->mode == L2CAP_MODE_STREAMING) {
1877 l2cap_streaming_send(sk);
1879 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1880 pi->conn_state && L2CAP_CONN_WAIT_F) {
1884 err = l2cap_ertm_send(sk);
1892 BT_DBG("bad state %1.1x", pi->mode);
1901 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1903 struct sock *sk = sock->sk;
1907 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1908 struct l2cap_conn_rsp rsp;
1909 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1912 sk->sk_state = BT_CONFIG;
1914 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1915 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1916 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1917 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1918 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1919 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1921 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1928 l2cap_build_conf_req(sk, buf), buf);
1929 l2cap_pi(sk)->num_conf_req++;
1937 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1940 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1942 struct sock *sk = sock->sk;
1943 struct l2cap_options opts;
1947 BT_DBG("sk %p", sk);
1953 if (sk->sk_state == BT_CONNECTED) {
1958 opts.imtu = l2cap_pi(sk)->imtu;
1959 opts.omtu = l2cap_pi(sk)->omtu;
1960 opts.flush_to = l2cap_pi(sk)->flush_to;
1961 opts.mode = l2cap_pi(sk)->mode;
1962 opts.fcs = l2cap_pi(sk)->fcs;
1963 opts.max_tx = l2cap_pi(sk)->max_tx;
1964 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1966 len = min_t(unsigned int, sizeof(opts), optlen);
1967 if (copy_from_user((char *) &opts, optval, len)) {
1972 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1977 l2cap_pi(sk)->mode = opts.mode;
1978 switch (l2cap_pi(sk)->mode) {
1979 case L2CAP_MODE_BASIC:
1980 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1982 case L2CAP_MODE_ERTM:
1983 case L2CAP_MODE_STREAMING:
1992 l2cap_pi(sk)->imtu = opts.imtu;
1993 l2cap_pi(sk)->omtu = opts.omtu;
1994 l2cap_pi(sk)->fcs = opts.fcs;
1995 l2cap_pi(sk)->max_tx = opts.max_tx;
1996 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2000 if (get_user(opt, (u32 __user *) optval)) {
2005 if (opt & L2CAP_LM_AUTH)
2006 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2007 if (opt & L2CAP_LM_ENCRYPT)
2008 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2009 if (opt & L2CAP_LM_SECURE)
2010 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2012 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2013 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2025 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2027 struct sock *sk = sock->sk;
2028 struct bt_security sec;
2032 BT_DBG("sk %p", sk);
2034 if (level == SOL_L2CAP)
2035 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2037 if (level != SOL_BLUETOOTH)
2038 return -ENOPROTOOPT;
2044 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2045 && sk->sk_type != SOCK_RAW) {
2050 sec.level = BT_SECURITY_LOW;
2052 len = min_t(unsigned int, sizeof(sec), optlen);
2053 if (copy_from_user((char *) &sec, optval, len)) {
2058 if (sec.level < BT_SECURITY_LOW ||
2059 sec.level > BT_SECURITY_HIGH) {
2064 l2cap_pi(sk)->sec_level = sec.level;
2067 case BT_DEFER_SETUP:
2068 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2073 if (get_user(opt, (u32 __user *) optval)) {
2078 bt_sk(sk)->defer_setup = opt;
2090 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2092 struct sock *sk = sock->sk;
2093 struct l2cap_options opts;
2094 struct l2cap_conninfo cinfo;
2098 BT_DBG("sk %p", sk);
2100 if (get_user(len, optlen))
2107 opts.imtu = l2cap_pi(sk)->imtu;
2108 opts.omtu = l2cap_pi(sk)->omtu;
2109 opts.flush_to = l2cap_pi(sk)->flush_to;
2110 opts.mode = l2cap_pi(sk)->mode;
2111 opts.fcs = l2cap_pi(sk)->fcs;
2112 opts.max_tx = l2cap_pi(sk)->max_tx;
2113 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2115 len = min_t(unsigned int, len, sizeof(opts));
2116 if (copy_to_user(optval, (char *) &opts, len))
2122 switch (l2cap_pi(sk)->sec_level) {
2123 case BT_SECURITY_LOW:
2124 opt = L2CAP_LM_AUTH;
2126 case BT_SECURITY_MEDIUM:
2127 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2129 case BT_SECURITY_HIGH:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2138 if (l2cap_pi(sk)->role_switch)
2139 opt |= L2CAP_LM_MASTER;
2141 if (l2cap_pi(sk)->force_reliable)
2142 opt |= L2CAP_LM_RELIABLE;
2144 if (put_user(opt, (u32 __user *) optval))
2148 case L2CAP_CONNINFO:
2149 if (sk->sk_state != BT_CONNECTED &&
2150 !(sk->sk_state == BT_CONNECT2 &&
2151 bt_sk(sk)->defer_setup)) {
2156 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2157 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2159 len = min_t(unsigned int, len, sizeof(cinfo));
2160 if (copy_to_user(optval, (char *) &cinfo, len))
2174 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2176 struct sock *sk = sock->sk;
2177 struct bt_security sec;
2180 BT_DBG("sk %p", sk);
2182 if (level == SOL_L2CAP)
2183 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2185 if (level != SOL_BLUETOOTH)
2186 return -ENOPROTOOPT;
2188 if (get_user(len, optlen))
2195 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2196 && sk->sk_type != SOCK_RAW) {
2201 sec.level = l2cap_pi(sk)->sec_level;
2203 len = min_t(unsigned int, len, sizeof(sec));
2204 if (copy_to_user(optval, (char *) &sec, len))
2209 case BT_DEFER_SETUP:
2210 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2215 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2229 static int l2cap_sock_shutdown(struct socket *sock, int how)
2231 struct sock *sk = sock->sk;
2234 BT_DBG("sock %p, sk %p", sock, sk);
2240 if (!sk->sk_shutdown) {
2241 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2242 err = __l2cap_wait_ack(sk);
2244 sk->sk_shutdown = SHUTDOWN_MASK;
2245 l2cap_sock_clear_timer(sk);
2246 __l2cap_sock_close(sk, 0);
2248 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2249 err = bt_sock_wait_state(sk, BT_CLOSED,
2253 if (!err && sk->sk_err)
2260 static int l2cap_sock_release(struct socket *sock)
2262 struct sock *sk = sock->sk;
2265 BT_DBG("sock %p, sk %p", sock, sk);
2270 err = l2cap_sock_shutdown(sock, 2);
2273 l2cap_sock_kill(sk);
2277 static void l2cap_chan_ready(struct sock *sk)
2279 struct sock *parent = bt_sk(sk)->parent;
2281 BT_DBG("sk %p, parent %p", sk, parent);
2283 l2cap_pi(sk)->conf_state = 0;
2284 l2cap_sock_clear_timer(sk);
2287 /* Outgoing channel.
2288 * Wake up socket sleeping on connect.
2290 sk->sk_state = BT_CONNECTED;
2291 sk->sk_state_change(sk);
2293 /* Incoming channel.
2294 * Wake up socket sleeping on accept.
2296 parent->sk_data_ready(parent, 0);
2300 /* Copy frame to all raw sockets on that connection */
2301 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2303 struct l2cap_chan_list *l = &conn->chan_list;
2304 struct sk_buff *nskb;
2307 BT_DBG("conn %p", conn);
2309 read_lock(&l->lock);
2310 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2311 if (sk->sk_type != SOCK_RAW)
2314 /* Don't send frame to the socket it came from */
2317 nskb = skb_clone(skb, GFP_ATOMIC);
2321 if (sock_queue_rcv_skb(sk, nskb))
2324 read_unlock(&l->lock);
2327 /* ---- L2CAP signalling commands ---- */
2328 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2329 u8 code, u8 ident, u16 dlen, void *data)
2331 struct sk_buff *skb, **frag;
2332 struct l2cap_cmd_hdr *cmd;
2333 struct l2cap_hdr *lh;
2336 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2337 conn, code, ident, dlen);
2339 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 skb = bt_skb_alloc(count, GFP_ATOMIC);
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2348 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2350 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2353 cmd->len = cpu_to_le16(dlen);
2356 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2357 memcpy(skb_put(skb, count), data, count);
2363 /* Continuation fragments (no L2CAP header) */
2364 frag = &skb_shinfo(skb)->frag_list;
2366 count = min_t(unsigned int, conn->mtu, len);
2368 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2372 memcpy(skb_put(*frag, count), data, count);
2377 frag = &(*frag)->next;
2387 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2389 struct l2cap_conf_opt *opt = *ptr;
2392 len = L2CAP_CONF_OPT_SIZE + opt->len;
2400 *val = *((u8 *) opt->val);
2404 *val = __le16_to_cpu(*((__le16 *) opt->val));
2408 *val = __le32_to_cpu(*((__le32 *) opt->val));
2412 *val = (unsigned long) opt->val;
2416 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2420 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2422 struct l2cap_conf_opt *opt = *ptr;
2424 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2431 *((u8 *) opt->val) = val;
2435 *((__le16 *) opt->val) = cpu_to_le16(val);
2439 *((__le32 *) opt->val) = cpu_to_le32(val);
2443 memcpy(opt->val, (void *) val, len);
2447 *ptr += L2CAP_CONF_OPT_SIZE + len;
2450 static void l2cap_ack_timeout(unsigned long arg)
2452 struct sock *sk = (void *) arg;
2455 l2cap_send_ack(l2cap_pi(sk));
2459 static inline void l2cap_ertm_init(struct sock *sk)
2461 l2cap_pi(sk)->expected_ack_seq = 0;
2462 l2cap_pi(sk)->unacked_frames = 0;
2463 l2cap_pi(sk)->buffer_seq = 0;
2464 l2cap_pi(sk)->num_acked = 0;
2465 l2cap_pi(sk)->frames_sent = 0;
2467 setup_timer(&l2cap_pi(sk)->retrans_timer,
2468 l2cap_retrans_timeout, (unsigned long) sk);
2469 setup_timer(&l2cap_pi(sk)->monitor_timer,
2470 l2cap_monitor_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->ack_timer,
2472 l2cap_ack_timeout, (unsigned long) sk);
2474 __skb_queue_head_init(SREJ_QUEUE(sk));
2475 __skb_queue_head_init(BUSY_QUEUE(sk));
2477 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2479 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2482 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2485 case L2CAP_MODE_STREAMING:
2486 case L2CAP_MODE_ERTM:
2487 if (l2cap_mode_supported(mode, remote_feat_mask))
2491 return L2CAP_MODE_BASIC;
2495 static int l2cap_build_conf_req(struct sock *sk, void *data)
2497 struct l2cap_pinfo *pi = l2cap_pi(sk);
2498 struct l2cap_conf_req *req = data;
2499 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2500 void *ptr = req->data;
2502 BT_DBG("sk %p", sk);
2504 if (pi->num_conf_req || pi->num_conf_rsp)
2508 case L2CAP_MODE_STREAMING:
2509 case L2CAP_MODE_ERTM:
2510 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2515 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2521 case L2CAP_MODE_BASIC:
2522 if (pi->imtu != L2CAP_DEFAULT_MTU)
2523 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2525 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2526 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2529 rfc.mode = L2CAP_MODE_BASIC;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = 0;
2536 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2537 (unsigned long) &rfc);
2540 case L2CAP_MODE_ERTM:
2541 rfc.mode = L2CAP_MODE_ERTM;
2542 rfc.txwin_size = pi->tx_win;
2543 rfc.max_transmit = pi->max_tx;
2544 rfc.retrans_timeout = 0;
2545 rfc.monitor_timeout = 0;
2546 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2547 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2548 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2550 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2551 (unsigned long) &rfc);
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2556 if (pi->fcs == L2CAP_FCS_NONE ||
2557 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2558 pi->fcs = L2CAP_FCS_NONE;
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2563 case L2CAP_MODE_STREAMING:
2564 rfc.mode = L2CAP_MODE_STREAMING;
2566 rfc.max_transmit = 0;
2567 rfc.retrans_timeout = 0;
2568 rfc.monitor_timeout = 0;
2569 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2570 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2571 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2574 (unsigned long) &rfc);
2576 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2579 if (pi->fcs == L2CAP_FCS_NONE ||
2580 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2581 pi->fcs = L2CAP_FCS_NONE;
2582 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2587 /* FIXME: Need actual value of the flush timeout */
2588 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2589 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2591 req->dcid = cpu_to_le16(pi->dcid);
2592 req->flags = cpu_to_le16(0);
2597 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2599 struct l2cap_pinfo *pi = l2cap_pi(sk);
2600 struct l2cap_conf_rsp *rsp = data;
2601 void *ptr = rsp->data;
2602 void *req = pi->conf_req;
2603 int len = pi->conf_len;
2604 int type, hint, olen;
2606 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2607 u16 mtu = L2CAP_DEFAULT_MTU;
2608 u16 result = L2CAP_CONF_SUCCESS;
2610 BT_DBG("sk %p", sk);
2612 while (len >= L2CAP_CONF_OPT_SIZE) {
2613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2615 hint = type & L2CAP_CONF_HINT;
2616 type &= L2CAP_CONF_MASK;
2619 case L2CAP_CONF_MTU:
2623 case L2CAP_CONF_FLUSH_TO:
2627 case L2CAP_CONF_QOS:
2630 case L2CAP_CONF_RFC:
2631 if (olen == sizeof(rfc))
2632 memcpy(&rfc, (void *) val, olen);
2635 case L2CAP_CONF_FCS:
2636 if (val == L2CAP_FCS_NONE)
2637 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2645 result = L2CAP_CONF_UNKNOWN;
2646 *((u8 *) ptr++) = type;
2651 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2655 case L2CAP_MODE_STREAMING:
2656 case L2CAP_MODE_ERTM:
2657 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2658 pi->mode = l2cap_select_mode(rfc.mode,
2659 pi->conn->feat_mask);
2663 if (pi->mode != rfc.mode)
2664 return -ECONNREFUSED;
2670 if (pi->mode != rfc.mode) {
2671 result = L2CAP_CONF_UNACCEPT;
2672 rfc.mode = pi->mode;
2674 if (pi->num_conf_rsp == 1)
2675 return -ECONNREFUSED;
2677 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2678 sizeof(rfc), (unsigned long) &rfc);
2682 if (result == L2CAP_CONF_SUCCESS) {
2683 /* Configure output options and let the other side know
2684 * which ones we don't like. */
2686 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2687 result = L2CAP_CONF_UNACCEPT;
2690 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2692 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2695 case L2CAP_MODE_BASIC:
2696 pi->fcs = L2CAP_FCS_NONE;
2697 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2700 case L2CAP_MODE_ERTM:
2701 pi->remote_tx_win = rfc.txwin_size;
2702 pi->remote_max_tx = rfc.max_transmit;
2704 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2705 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2707 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2709 rfc.retrans_timeout =
2710 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2711 rfc.monitor_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2714 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2717 sizeof(rfc), (unsigned long) &rfc);
2721 case L2CAP_MODE_STREAMING:
2722 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2723 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2725 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2727 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2729 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2730 sizeof(rfc), (unsigned long) &rfc);
2735 result = L2CAP_CONF_UNACCEPT;
2737 memset(&rfc, 0, sizeof(rfc));
2738 rfc.mode = pi->mode;
2741 if (result == L2CAP_CONF_SUCCESS)
2742 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2744 rsp->scid = cpu_to_le16(pi->dcid);
2745 rsp->result = cpu_to_le16(result);
2746 rsp->flags = cpu_to_le16(0x0000);
2751 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2753 struct l2cap_pinfo *pi = l2cap_pi(sk);
2754 struct l2cap_conf_req *req = data;
2755 void *ptr = req->data;
2758 struct l2cap_conf_rfc rfc;
2760 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2762 while (len >= L2CAP_CONF_OPT_SIZE) {
2763 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2766 case L2CAP_CONF_MTU:
2767 if (val < L2CAP_DEFAULT_MIN_MTU) {
2768 *result = L2CAP_CONF_UNACCEPT;
2769 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2775 case L2CAP_CONF_FLUSH_TO:
2777 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2781 case L2CAP_CONF_RFC:
2782 if (olen == sizeof(rfc))
2783 memcpy(&rfc, (void *)val, olen);
2785 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2786 rfc.mode != pi->mode)
2787 return -ECONNREFUSED;
2791 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2792 sizeof(rfc), (unsigned long) &rfc);
2797 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2798 return -ECONNREFUSED;
2800 pi->mode = rfc.mode;
2802 if (*result == L2CAP_CONF_SUCCESS) {
2804 case L2CAP_MODE_ERTM:
2805 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2806 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2809 case L2CAP_MODE_STREAMING:
2810 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2814 req->dcid = cpu_to_le16(pi->dcid);
2815 req->flags = cpu_to_le16(0x0000);
2820 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2822 struct l2cap_conf_rsp *rsp = data;
2823 void *ptr = rsp->data;
2825 BT_DBG("sk %p", sk);
2827 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2828 rsp->result = cpu_to_le16(result);
2829 rsp->flags = cpu_to_le16(flags);
2834 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2836 struct l2cap_pinfo *pi = l2cap_pi(sk);
2839 struct l2cap_conf_rfc rfc;
2841 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2843 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2846 while (len >= L2CAP_CONF_OPT_SIZE) {
2847 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2850 case L2CAP_CONF_RFC:
2851 if (olen == sizeof(rfc))
2852 memcpy(&rfc, (void *)val, olen);
2859 case L2CAP_MODE_ERTM:
2860 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2861 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2862 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2864 case L2CAP_MODE_STREAMING:
2865 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2869 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2871 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2873 if (rej->reason != 0x0000)
2876 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2877 cmd->ident == conn->info_ident) {
2878 del_timer(&conn->info_timer);
2880 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2881 conn->info_ident = 0;
2883 l2cap_conn_start(conn);
2889 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2891 struct l2cap_chan_list *list = &conn->chan_list;
2892 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2893 struct l2cap_conn_rsp rsp;
2894 struct sock *parent, *uninitialized_var(sk);
2895 int result, status = L2CAP_CS_NO_INFO;
2897 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2898 __le16 psm = req->psm;
2900 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2902 /* Check if we have socket listening on psm */
2903 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2905 result = L2CAP_CR_BAD_PSM;
2909 /* Check if the ACL is secure enough (if not SDP) */
2910 if (psm != cpu_to_le16(0x0001) &&
2911 !hci_conn_check_link_mode(conn->hcon)) {
2912 conn->disc_reason = 0x05;
2913 result = L2CAP_CR_SEC_BLOCK;
2917 result = L2CAP_CR_NO_MEM;
2919 /* Check for backlog size */
2920 if (sk_acceptq_is_full(parent)) {
2921 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2925 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2929 write_lock_bh(&list->lock);
2931 /* Check if we already have channel with that dcid */
2932 if (__l2cap_get_chan_by_dcid(list, scid)) {
2933 write_unlock_bh(&list->lock);
2934 sock_set_flag(sk, SOCK_ZAPPED);
2935 l2cap_sock_kill(sk);
2939 hci_conn_hold(conn->hcon);
2941 l2cap_sock_init(sk, parent);
2942 bacpy(&bt_sk(sk)->src, conn->src);
2943 bacpy(&bt_sk(sk)->dst, conn->dst);
2944 l2cap_pi(sk)->psm = psm;
2945 l2cap_pi(sk)->dcid = scid;
2947 __l2cap_chan_add(conn, sk, parent);
2948 dcid = l2cap_pi(sk)->scid;
2950 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2952 l2cap_pi(sk)->ident = cmd->ident;
2954 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2955 if (l2cap_check_security(sk)) {
2956 if (bt_sk(sk)->defer_setup) {
2957 sk->sk_state = BT_CONNECT2;
2958 result = L2CAP_CR_PEND;
2959 status = L2CAP_CS_AUTHOR_PEND;
2960 parent->sk_data_ready(parent, 0);
2962 sk->sk_state = BT_CONFIG;
2963 result = L2CAP_CR_SUCCESS;
2964 status = L2CAP_CS_NO_INFO;
2967 sk->sk_state = BT_CONNECT2;
2968 result = L2CAP_CR_PEND;
2969 status = L2CAP_CS_AUTHEN_PEND;
2972 sk->sk_state = BT_CONNECT2;
2973 result = L2CAP_CR_PEND;
2974 status = L2CAP_CS_NO_INFO;
2977 write_unlock_bh(&list->lock);
2980 bh_unlock_sock(parent);
2983 rsp.scid = cpu_to_le16(scid);
2984 rsp.dcid = cpu_to_le16(dcid);
2985 rsp.result = cpu_to_le16(result);
2986 rsp.status = cpu_to_le16(status);
2987 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2989 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2990 struct l2cap_info_req info;
2991 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2994 conn->info_ident = l2cap_get_ident(conn);
2996 mod_timer(&conn->info_timer, jiffies +
2997 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2999 l2cap_send_cmd(conn, conn->info_ident,
3000 L2CAP_INFO_REQ, sizeof(info), &info);
3003 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3004 result == L2CAP_CR_SUCCESS) {
3006 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3007 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3008 l2cap_build_conf_req(sk, buf), buf);
3009 l2cap_pi(sk)->num_conf_req++;
3015 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3017 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3018 u16 scid, dcid, result, status;
3022 scid = __le16_to_cpu(rsp->scid);
3023 dcid = __le16_to_cpu(rsp->dcid);
3024 result = __le16_to_cpu(rsp->result);
3025 status = __le16_to_cpu(rsp->status);
3027 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3030 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3034 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3040 case L2CAP_CR_SUCCESS:
3041 sk->sk_state = BT_CONFIG;
3042 l2cap_pi(sk)->ident = 0;
3043 l2cap_pi(sk)->dcid = dcid;
3044 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3046 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3049 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3051 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3052 l2cap_build_conf_req(sk, req), req);
3053 l2cap_pi(sk)->num_conf_req++;
3057 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3061 l2cap_chan_del(sk, ECONNREFUSED);
3069 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3071 /* FCS is enabled only in ERTM or streaming mode, if one or both
3074 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3075 pi->fcs = L2CAP_FCS_NONE;
3076 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3077 pi->fcs = L2CAP_FCS_CRC16;
3080 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3082 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3088 dcid = __le16_to_cpu(req->dcid);
3089 flags = __le16_to_cpu(req->flags);
3091 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3093 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3097 if (sk->sk_state == BT_DISCONN)
3100 /* Reject if config buffer is too small. */
3101 len = cmd_len - sizeof(*req);
3102 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3104 l2cap_build_conf_rsp(sk, rsp,
3105 L2CAP_CONF_REJECT, flags), rsp);
3110 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3111 l2cap_pi(sk)->conf_len += len;
3113 if (flags & 0x0001) {
3114 /* Incomplete config. Send empty response. */
3115 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3116 l2cap_build_conf_rsp(sk, rsp,
3117 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3121 /* Complete config. */
3122 len = l2cap_parse_conf_req(sk, rsp);
3124 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3129 l2cap_pi(sk)->num_conf_rsp++;
3131 /* Reset config buffer. */
3132 l2cap_pi(sk)->conf_len = 0;
3134 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3138 set_default_fcs(l2cap_pi(sk));
3140 sk->sk_state = BT_CONNECTED;
3142 l2cap_pi(sk)->next_tx_seq = 0;
3143 l2cap_pi(sk)->expected_tx_seq = 0;
3144 __skb_queue_head_init(TX_QUEUE(sk));
3145 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3146 l2cap_ertm_init(sk);
3148 l2cap_chan_ready(sk);
3152 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3154 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3155 l2cap_build_conf_req(sk, buf), buf);
3156 l2cap_pi(sk)->num_conf_req++;
3164 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3166 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3167 u16 scid, flags, result;
3169 int len = cmd->len - sizeof(*rsp);
3171 scid = __le16_to_cpu(rsp->scid);
3172 flags = __le16_to_cpu(rsp->flags);
3173 result = __le16_to_cpu(rsp->result);
3175 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3176 scid, flags, result);
3178 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3183 case L2CAP_CONF_SUCCESS:
3184 l2cap_conf_rfc_get(sk, rsp->data, len);
3187 case L2CAP_CONF_UNACCEPT:
3188 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3191 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3192 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3196 /* throw out any old stored conf requests */
3197 result = L2CAP_CONF_SUCCESS;
3198 len = l2cap_parse_conf_rsp(sk, rsp->data,
3201 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3205 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3206 L2CAP_CONF_REQ, len, req);
3207 l2cap_pi(sk)->num_conf_req++;
3208 if (result != L2CAP_CONF_SUCCESS)
3214 sk->sk_err = ECONNRESET;
3215 l2cap_sock_set_timer(sk, HZ * 5);
3216 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3223 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3225 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3226 set_default_fcs(l2cap_pi(sk));
3228 sk->sk_state = BT_CONNECTED;
3229 l2cap_pi(sk)->next_tx_seq = 0;
3230 l2cap_pi(sk)->expected_tx_seq = 0;
3231 __skb_queue_head_init(TX_QUEUE(sk));
3232 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3233 l2cap_ertm_init(sk);
3235 l2cap_chan_ready(sk);
3243 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3246 struct l2cap_disconn_rsp rsp;
3250 scid = __le16_to_cpu(req->scid);
3251 dcid = __le16_to_cpu(req->dcid);
3253 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3255 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3259 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3260 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3261 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3263 sk->sk_shutdown = SHUTDOWN_MASK;
3265 l2cap_chan_del(sk, ECONNRESET);
3268 l2cap_sock_kill(sk);
3272 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3274 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3278 scid = __le16_to_cpu(rsp->scid);
3279 dcid = __le16_to_cpu(rsp->dcid);
3281 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3283 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3287 l2cap_chan_del(sk, 0);
3290 l2cap_sock_kill(sk);
3294 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3296 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3299 type = __le16_to_cpu(req->type);
3301 BT_DBG("type 0x%4.4x", type);
3303 if (type == L2CAP_IT_FEAT_MASK) {
3305 u32 feat_mask = l2cap_feat_mask;
3306 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3307 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3308 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3310 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3312 put_unaligned_le32(feat_mask, rsp->data);
3313 l2cap_send_cmd(conn, cmd->ident,
3314 L2CAP_INFO_RSP, sizeof(buf), buf);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3317 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3318 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3319 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3320 memcpy(buf + 4, l2cap_fixed_chan, 8);
3321 l2cap_send_cmd(conn, cmd->ident,
3322 L2CAP_INFO_RSP, sizeof(buf), buf);
3324 struct l2cap_info_rsp rsp;
3325 rsp.type = cpu_to_le16(type);
3326 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3327 l2cap_send_cmd(conn, cmd->ident,
3328 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3334 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3339 type = __le16_to_cpu(rsp->type);
3340 result = __le16_to_cpu(rsp->result);
3342 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3344 del_timer(&conn->info_timer);
3346 if (result != L2CAP_IR_SUCCESS) {
3347 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3348 conn->info_ident = 0;
3350 l2cap_conn_start(conn);
3355 if (type == L2CAP_IT_FEAT_MASK) {
3356 conn->feat_mask = get_unaligned_le32(rsp->data);
3358 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3359 struct l2cap_info_req req;
3360 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3362 conn->info_ident = l2cap_get_ident(conn);
3364 l2cap_send_cmd(conn, conn->info_ident,
3365 L2CAP_INFO_REQ, sizeof(req), &req);
3367 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3368 conn->info_ident = 0;
3370 l2cap_conn_start(conn);
3372 } else if (type == L2CAP_IT_FIXED_CHAN) {
3373 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3374 conn->info_ident = 0;
3376 l2cap_conn_start(conn);
3382 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3384 u8 *data = skb->data;
3386 struct l2cap_cmd_hdr cmd;
3389 l2cap_raw_recv(conn, skb);
3391 while (len >= L2CAP_CMD_HDR_SIZE) {
3393 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3394 data += L2CAP_CMD_HDR_SIZE;
3395 len -= L2CAP_CMD_HDR_SIZE;
3397 cmd_len = le16_to_cpu(cmd.len);
3399 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3401 if (cmd_len > len || !cmd.ident) {
3402 BT_DBG("corrupted command");
3407 case L2CAP_COMMAND_REJ:
3408 l2cap_command_rej(conn, &cmd, data);
3411 case L2CAP_CONN_REQ:
3412 err = l2cap_connect_req(conn, &cmd, data);
3415 case L2CAP_CONN_RSP:
3416 err = l2cap_connect_rsp(conn, &cmd, data);
3419 case L2CAP_CONF_REQ:
3420 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3423 case L2CAP_CONF_RSP:
3424 err = l2cap_config_rsp(conn, &cmd, data);
3427 case L2CAP_DISCONN_REQ:
3428 err = l2cap_disconnect_req(conn, &cmd, data);
3431 case L2CAP_DISCONN_RSP:
3432 err = l2cap_disconnect_rsp(conn, &cmd, data);
3435 case L2CAP_ECHO_REQ:
3436 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3439 case L2CAP_ECHO_RSP:
3442 case L2CAP_INFO_REQ:
3443 err = l2cap_information_req(conn, &cmd, data);
3446 case L2CAP_INFO_RSP:
3447 err = l2cap_information_rsp(conn, &cmd, data);
3451 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3457 struct l2cap_cmd_rej rej;
3458 BT_DBG("error %d", err);
3460 /* FIXME: Map err to a valid reason */
3461 rej.reason = cpu_to_le16(0);
3462 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3472 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3474 u16 our_fcs, rcv_fcs;
3475 int hdr_size = L2CAP_HDR_SIZE + 2;
3477 if (pi->fcs == L2CAP_FCS_CRC16) {
3478 skb_trim(skb, skb->len - 2);
3479 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3480 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3482 if (our_fcs != rcv_fcs)
3488 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3493 pi->frames_sent = 0;
3495 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3497 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3498 control |= L2CAP_SUPER_RCV_NOT_READY;
3499 l2cap_send_sframe(pi, control);
3500 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3503 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3504 l2cap_retransmit_frames(sk);
3506 l2cap_ertm_send(sk);
3508 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3509 pi->frames_sent == 0) {
3510 control |= L2CAP_SUPER_RCV_READY;
3511 l2cap_send_sframe(pi, control);
3515 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3517 struct sk_buff *next_skb;
3518 struct l2cap_pinfo *pi = l2cap_pi(sk);
3519 int tx_seq_offset, next_tx_seq_offset;
3521 bt_cb(skb)->tx_seq = tx_seq;
3522 bt_cb(skb)->sar = sar;
3524 next_skb = skb_peek(SREJ_QUEUE(sk));
3526 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3530 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3531 if (tx_seq_offset < 0)
3532 tx_seq_offset += 64;
3535 if (bt_cb(next_skb)->tx_seq == tx_seq)
3538 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3539 pi->buffer_seq) % 64;
3540 if (next_tx_seq_offset < 0)
3541 next_tx_seq_offset += 64;
3543 if (next_tx_seq_offset > tx_seq_offset) {
3544 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3548 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3551 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3553 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3558 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3560 struct l2cap_pinfo *pi = l2cap_pi(sk);
3561 struct sk_buff *_skb;
3564 switch (control & L2CAP_CTRL_SAR) {
3565 case L2CAP_SDU_UNSEGMENTED:
3566 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3569 err = sock_queue_rcv_skb(sk, skb);
3575 case L2CAP_SDU_START:
3576 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3579 pi->sdu_len = get_unaligned_le16(skb->data);
3581 if (pi->sdu_len > pi->imtu)
3584 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3588 /* pull sdu_len bytes only after alloc, because of Local Busy
3589 * condition we have to be sure that this will be executed
3590 * only once, i.e., when alloc does not fail */
3593 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3595 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3596 pi->partial_sdu_len = skb->len;
3599 case L2CAP_SDU_CONTINUE:
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3606 pi->partial_sdu_len += skb->len;
3607 if (pi->partial_sdu_len > pi->sdu_len)
3610 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3615 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3621 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3622 pi->partial_sdu_len += skb->len;
3624 if (pi->partial_sdu_len > pi->imtu)
3627 if (pi->partial_sdu_len != pi->sdu_len)
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3633 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3635 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3639 err = sock_queue_rcv_skb(sk, _skb);
3642 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3646 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3647 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3661 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3666 static int l2cap_try_push_rx_skb(struct sock *sk)
3668 struct l2cap_pinfo *pi = l2cap_pi(sk);
3669 struct sk_buff *skb;
3673 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3674 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3675 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3677 skb_queue_head(BUSY_QUEUE(sk), skb);
3681 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3684 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3687 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3688 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3689 l2cap_send_sframe(pi, control);
3690 l2cap_pi(sk)->retry_count = 1;
3692 del_timer(&pi->retrans_timer);
3693 __mod_monitor_timer();
3695 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3698 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3699 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3701 BT_DBG("sk %p, Exit local busy", sk);
3706 static void l2cap_busy_work(struct work_struct *work)
3708 DECLARE_WAITQUEUE(wait, current);
3709 struct l2cap_pinfo *pi =
3710 container_of(work, struct l2cap_pinfo, busy_work);
3711 struct sock *sk = (struct sock *)pi;
3712 int n_tries = 0, timeo = HZ/5, err;
3713 struct sk_buff *skb;
3717 add_wait_queue(sk_sleep(sk), &wait);
3718 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3719 set_current_state(TASK_INTERRUPTIBLE);
3721 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3723 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3730 if (signal_pending(current)) {
3731 err = sock_intr_errno(timeo);
3736 timeo = schedule_timeout(timeo);
3739 err = sock_error(sk);
3743 if (l2cap_try_push_rx_skb(sk) == 0)
3747 set_current_state(TASK_RUNNING);
3748 remove_wait_queue(sk_sleep(sk), &wait);
3753 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3755 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3759 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3760 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3761 return l2cap_try_push_rx_skb(sk);
3766 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3768 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3772 /* Busy Condition */
3773 BT_DBG("sk %p, Enter local busy", sk);
3775 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3776 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3777 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3779 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3780 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3781 l2cap_send_sframe(pi, sctrl);
3783 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3785 del_timer(&pi->ack_timer);
3787 queue_work(_busy_wq, &pi->busy_work);
3792 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 struct sk_buff *_skb;
3799 * TODO: We have to notify the userland if some data is lost with the
3803 switch (control & L2CAP_CTRL_SAR) {
3804 case L2CAP_SDU_UNSEGMENTED:
3805 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3810 err = sock_queue_rcv_skb(sk, skb);
3816 case L2CAP_SDU_START:
3817 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3822 pi->sdu_len = get_unaligned_le16(skb->data);
3825 if (pi->sdu_len > pi->imtu) {
3830 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3836 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3838 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3839 pi->partial_sdu_len = skb->len;
3843 case L2CAP_SDU_CONTINUE:
3844 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3847 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3849 pi->partial_sdu_len += skb->len;
3850 if (pi->partial_sdu_len > pi->sdu_len)
3858 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3861 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3863 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3864 pi->partial_sdu_len += skb->len;
3866 if (pi->partial_sdu_len > pi->imtu)
3869 if (pi->partial_sdu_len == pi->sdu_len) {
3870 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3871 err = sock_queue_rcv_skb(sk, _skb);
3886 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3888 struct sk_buff *skb;
3891 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3892 if (bt_cb(skb)->tx_seq != tx_seq)
3895 skb = skb_dequeue(SREJ_QUEUE(sk));
3896 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3897 l2cap_ertm_reassembly_sdu(sk, skb, control);
3898 l2cap_pi(sk)->buffer_seq_srej =
3899 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3900 tx_seq = (tx_seq + 1) % 64;
3904 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3906 struct l2cap_pinfo *pi = l2cap_pi(sk);
3907 struct srej_list *l, *tmp;
3910 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3911 if (l->tx_seq == tx_seq) {
3916 control = L2CAP_SUPER_SELECT_REJECT;
3917 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3918 l2cap_send_sframe(pi, control);
3920 list_add_tail(&l->list, SREJ_LIST(sk));
3924 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3927 struct srej_list *new;
3930 while (tx_seq != pi->expected_tx_seq) {
3931 control = L2CAP_SUPER_SELECT_REJECT;
3932 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3933 l2cap_send_sframe(pi, control);
3935 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3936 new->tx_seq = pi->expected_tx_seq;
3937 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3938 list_add_tail(&new->list, SREJ_LIST(sk));
3940 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3943 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3945 struct l2cap_pinfo *pi = l2cap_pi(sk);
3946 u8 tx_seq = __get_txseq(rx_control);
3947 u8 req_seq = __get_reqseq(rx_control);
3948 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3949 int tx_seq_offset, expected_tx_seq_offset;
3950 int num_to_ack = (pi->tx_win/6) + 1;
3953 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3956 if (L2CAP_CTRL_FINAL & rx_control &&
3957 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3958 del_timer(&pi->monitor_timer);
3959 if (pi->unacked_frames > 0)
3960 __mod_retrans_timer();
3961 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3964 pi->expected_ack_seq = req_seq;
3965 l2cap_drop_acked_frames(sk);
3967 if (tx_seq == pi->expected_tx_seq)
3970 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3971 if (tx_seq_offset < 0)
3972 tx_seq_offset += 64;
3974 /* invalid tx_seq */
3975 if (tx_seq_offset >= pi->tx_win) {
3976 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3980 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3983 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3984 struct srej_list *first;
3986 first = list_first_entry(SREJ_LIST(sk),
3987 struct srej_list, list);
3988 if (tx_seq == first->tx_seq) {
3989 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3990 l2cap_check_srej_gap(sk, tx_seq);
3992 list_del(&first->list);
3995 if (list_empty(SREJ_LIST(sk))) {
3996 pi->buffer_seq = pi->buffer_seq_srej;
3997 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3999 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4002 struct srej_list *l;
4004 /* duplicated tx_seq */
4005 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4008 list_for_each_entry(l, SREJ_LIST(sk), list) {
4009 if (l->tx_seq == tx_seq) {
4010 l2cap_resend_srejframe(sk, tx_seq);
4014 l2cap_send_srejframe(sk, tx_seq);
4017 expected_tx_seq_offset =
4018 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4019 if (expected_tx_seq_offset < 0)
4020 expected_tx_seq_offset += 64;
4022 /* duplicated tx_seq */
4023 if (tx_seq_offset < expected_tx_seq_offset)
4026 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4028 BT_DBG("sk %p, Enter SREJ", sk);
4030 INIT_LIST_HEAD(SREJ_LIST(sk));
4031 pi->buffer_seq_srej = pi->buffer_seq;
4033 __skb_queue_head_init(SREJ_QUEUE(sk));
4034 __skb_queue_head_init(BUSY_QUEUE(sk));
4035 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4037 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4039 l2cap_send_srejframe(sk, tx_seq);
4041 del_timer(&pi->ack_timer);
4046 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4049 bt_cb(skb)->tx_seq = tx_seq;
4050 bt_cb(skb)->sar = sar;
4051 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4055 err = l2cap_push_rx_skb(sk, skb, rx_control);
4059 if (rx_control & L2CAP_CTRL_FINAL) {
4060 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4061 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4063 l2cap_retransmit_frames(sk);
4068 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4069 if (pi->num_acked == num_to_ack - 1)
4079 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4081 struct l2cap_pinfo *pi = l2cap_pi(sk);
4083 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4086 pi->expected_ack_seq = __get_reqseq(rx_control);
4087 l2cap_drop_acked_frames(sk);
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4091 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4092 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4093 (pi->unacked_frames > 0))
4094 __mod_retrans_timer();
4096 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4097 l2cap_send_srejtail(sk);
4099 l2cap_send_i_or_rr_or_rnr(sk);
4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
4103 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4105 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4106 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4108 l2cap_retransmit_frames(sk);
4111 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4112 (pi->unacked_frames > 0))
4113 __mod_retrans_timer();
4115 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4116 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4119 l2cap_ertm_send(sk);
4124 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4126 struct l2cap_pinfo *pi = l2cap_pi(sk);
4127 u8 tx_seq = __get_reqseq(rx_control);
4129 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4131 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4133 pi->expected_ack_seq = tx_seq;
4134 l2cap_drop_acked_frames(sk);
4136 if (rx_control & L2CAP_CTRL_FINAL) {
4137 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4138 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4140 l2cap_retransmit_frames(sk);
4142 l2cap_retransmit_frames(sk);
4144 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4145 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4148 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4150 struct l2cap_pinfo *pi = l2cap_pi(sk);
4151 u8 tx_seq = __get_reqseq(rx_control);
4153 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4155 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4157 if (rx_control & L2CAP_CTRL_POLL) {
4158 pi->expected_ack_seq = tx_seq;
4159 l2cap_drop_acked_frames(sk);
4161 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4164 l2cap_ertm_send(sk);
4166 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4167 pi->srej_save_reqseq = tx_seq;
4168 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4170 } else if (rx_control & L2CAP_CTRL_FINAL) {
4171 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4172 pi->srej_save_reqseq == tx_seq)
4173 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4175 l2cap_retransmit_one_frame(sk, tx_seq);
4177 l2cap_retransmit_one_frame(sk, tx_seq);
4178 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4179 pi->srej_save_reqseq = tx_seq;
4180 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4185 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4187 struct l2cap_pinfo *pi = l2cap_pi(sk);
4188 u8 tx_seq = __get_reqseq(rx_control);
4190 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4192 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4193 pi->expected_ack_seq = tx_seq;
4194 l2cap_drop_acked_frames(sk);
4196 if (rx_control & L2CAP_CTRL_POLL)
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4199 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4200 del_timer(&pi->retrans_timer);
4201 if (rx_control & L2CAP_CTRL_POLL)
4202 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4206 if (rx_control & L2CAP_CTRL_POLL)
4207 l2cap_send_srejtail(sk);
4209 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4212 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4214 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4216 if (L2CAP_CTRL_FINAL & rx_control &&
4217 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4218 del_timer(&l2cap_pi(sk)->monitor_timer);
4219 if (l2cap_pi(sk)->unacked_frames > 0)
4220 __mod_retrans_timer();
4221 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4224 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4225 case L2CAP_SUPER_RCV_READY:
4226 l2cap_data_channel_rrframe(sk, rx_control);
4229 case L2CAP_SUPER_REJECT:
4230 l2cap_data_channel_rejframe(sk, rx_control);
4233 case L2CAP_SUPER_SELECT_REJECT:
4234 l2cap_data_channel_srejframe(sk, rx_control);
4237 case L2CAP_SUPER_RCV_NOT_READY:
4238 l2cap_data_channel_rnrframe(sk, rx_control);
4246 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4248 struct l2cap_pinfo *pi = l2cap_pi(sk);
4251 int len, next_tx_seq_offset, req_seq_offset;
4253 control = get_unaligned_le16(skb->data);
4258 * We can just drop the corrupted I-frame here.
4259 * Receiver will miss it and start proper recovery
4260 * procedures and ask retransmission.
4262 if (l2cap_check_fcs(pi, skb))
4265 if (__is_sar_start(control) && __is_iframe(control))
4268 if (pi->fcs == L2CAP_FCS_CRC16)
4271 if (len > pi->mps) {
4272 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4276 req_seq = __get_reqseq(control);
4277 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4278 if (req_seq_offset < 0)
4279 req_seq_offset += 64;
4281 next_tx_seq_offset =
4282 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4283 if (next_tx_seq_offset < 0)
4284 next_tx_seq_offset += 64;
4286 /* check for invalid req-seq */
4287 if (req_seq_offset > next_tx_seq_offset) {
4288 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4292 if (__is_iframe(control)) {
4294 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4298 l2cap_data_channel_iframe(sk, control, skb);
4302 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4306 l2cap_data_channel_sframe(sk, control, skb);
4316 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4319 struct l2cap_pinfo *pi;
4324 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4326 BT_DBG("unknown cid 0x%4.4x", cid);
4332 BT_DBG("sk %p, len %d", sk, skb->len);
4334 if (sk->sk_state != BT_CONNECTED)
4338 case L2CAP_MODE_BASIC:
4339 /* If socket recv buffers overflows we drop data here
4340 * which is *bad* because L2CAP has to be reliable.
4341 * But we don't have any other choice. L2CAP doesn't
4342 * provide flow control mechanism. */
4344 if (pi->imtu < skb->len)
4347 if (!sock_queue_rcv_skb(sk, skb))
4351 case L2CAP_MODE_ERTM:
4352 if (!sock_owned_by_user(sk)) {
4353 l2cap_ertm_data_rcv(sk, skb);
4355 if (sk_add_backlog(sk, skb))
4361 case L2CAP_MODE_STREAMING:
4362 control = get_unaligned_le16(skb->data);
4366 if (l2cap_check_fcs(pi, skb))
4369 if (__is_sar_start(control))
4372 if (pi->fcs == L2CAP_FCS_CRC16)
4375 if (len > pi->mps || len < 0 || __is_sframe(control))
4378 tx_seq = __get_txseq(control);
4380 if (pi->expected_tx_seq == tx_seq)
4381 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4383 pi->expected_tx_seq = (tx_seq + 1) % 64;
4385 l2cap_streaming_reassembly_sdu(sk, skb, control);
4390 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4404 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4408 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4412 BT_DBG("sk %p, len %d", sk, skb->len);
4414 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4417 if (l2cap_pi(sk)->imtu < skb->len)
4420 if (!sock_queue_rcv_skb(sk, skb))
4432 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4434 struct l2cap_hdr *lh = (void *) skb->data;
4438 skb_pull(skb, L2CAP_HDR_SIZE);
4439 cid = __le16_to_cpu(lh->cid);
4440 len = __le16_to_cpu(lh->len);
4442 if (len != skb->len) {
4447 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4450 case L2CAP_CID_SIGNALING:
4451 l2cap_sig_channel(conn, skb);
4454 case L2CAP_CID_CONN_LESS:
4455 psm = get_unaligned_le16(skb->data);
4457 l2cap_conless_channel(conn, psm, skb);
4461 l2cap_data_channel(conn, cid, skb);
4466 /* ---- L2CAP interface with lower layer (HCI) ---- */
4468 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 register struct sock *sk;
4472 struct hlist_node *node;
4474 if (type != ACL_LINK)
4477 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4479 /* Find listening sockets and check their link_mode */
4480 read_lock(&l2cap_sk_list.lock);
4481 sk_for_each(sk, node, &l2cap_sk_list.head) {
4482 if (sk->sk_state != BT_LISTEN)
4485 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4486 lm1 |= HCI_LM_ACCEPT;
4487 if (l2cap_pi(sk)->role_switch)
4488 lm1 |= HCI_LM_MASTER;
4490 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4491 lm2 |= HCI_LM_ACCEPT;
4492 if (l2cap_pi(sk)->role_switch)
4493 lm2 |= HCI_LM_MASTER;
4496 read_unlock(&l2cap_sk_list.lock);
4498 return exact ? lm1 : lm2;
4501 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4503 struct l2cap_conn *conn;
4505 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4507 if (hcon->type != ACL_LINK)
4511 conn = l2cap_conn_add(hcon, status);
4513 l2cap_conn_ready(conn);
4515 l2cap_conn_del(hcon, bt_err(status));
4520 static int l2cap_disconn_ind(struct hci_conn *hcon)
4522 struct l2cap_conn *conn = hcon->l2cap_data;
4524 BT_DBG("hcon %p", hcon);
4526 if (hcon->type != ACL_LINK || !conn)
4529 return conn->disc_reason;
4532 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4534 BT_DBG("hcon %p reason %d", hcon, reason);
4536 if (hcon->type != ACL_LINK)
4539 l2cap_conn_del(hcon, bt_err(reason));
4544 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4546 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4549 if (encrypt == 0x00) {
4550 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4551 l2cap_sock_clear_timer(sk);
4552 l2cap_sock_set_timer(sk, HZ * 5);
4553 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4554 __l2cap_sock_close(sk, ECONNREFUSED);
4556 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4557 l2cap_sock_clear_timer(sk);
4561 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4563 struct l2cap_chan_list *l;
4564 struct l2cap_conn *conn = hcon->l2cap_data;
4570 l = &conn->chan_list;
4572 BT_DBG("conn %p", conn);
4574 read_lock(&l->lock);
4576 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4579 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4584 if (!status && (sk->sk_state == BT_CONNECTED ||
4585 sk->sk_state == BT_CONFIG)) {
4586 l2cap_check_encryption(sk, encrypt);
4591 if (sk->sk_state == BT_CONNECT) {
4593 struct l2cap_conn_req req;
4594 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4595 req.psm = l2cap_pi(sk)->psm;
4597 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4598 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4600 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4601 L2CAP_CONN_REQ, sizeof(req), &req);
4603 l2cap_sock_clear_timer(sk);
4604 l2cap_sock_set_timer(sk, HZ / 10);
4606 } else if (sk->sk_state == BT_CONNECT2) {
4607 struct l2cap_conn_rsp rsp;
4611 sk->sk_state = BT_CONFIG;
4612 result = L2CAP_CR_SUCCESS;
4614 sk->sk_state = BT_DISCONN;
4615 l2cap_sock_set_timer(sk, HZ / 10);
4616 result = L2CAP_CR_SEC_BLOCK;
4619 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4620 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4621 rsp.result = cpu_to_le16(result);
4622 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4623 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4624 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4630 read_unlock(&l->lock);
4635 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4637 struct l2cap_conn *conn = hcon->l2cap_data;
4639 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4642 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4644 if (flags & ACL_START) {
4645 struct l2cap_hdr *hdr;
4649 BT_ERR("Unexpected start frame (len %d)", skb->len);
4650 kfree_skb(conn->rx_skb);
4651 conn->rx_skb = NULL;
4653 l2cap_conn_unreliable(conn, ECOMM);
4657 BT_ERR("Frame is too short (len %d)", skb->len);
4658 l2cap_conn_unreliable(conn, ECOMM);
4662 hdr = (struct l2cap_hdr *) skb->data;
4663 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4665 if (len == skb->len) {
4666 /* Complete frame received */
4667 l2cap_recv_frame(conn, skb);
4671 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4673 if (skb->len > len) {
4674 BT_ERR("Frame is too long (len %d, expected len %d)",
4676 l2cap_conn_unreliable(conn, ECOMM);
4680 /* Allocate skb for the complete frame (with header) */
4681 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4685 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4687 conn->rx_len = len - skb->len;
4689 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4691 if (!conn->rx_len) {
4692 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4697 if (skb->len > conn->rx_len) {
4698 BT_ERR("Fragment is too long (len %d, expected %d)",
4699 skb->len, conn->rx_len);
4700 kfree_skb(conn->rx_skb);
4701 conn->rx_skb = NULL;
4703 l2cap_conn_unreliable(conn, ECOMM);
4707 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4709 conn->rx_len -= skb->len;
4711 if (!conn->rx_len) {
4712 /* Complete frame received */
4713 l2cap_recv_frame(conn, conn->rx_skb);
4714 conn->rx_skb = NULL;
4723 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4726 struct hlist_node *node;
4728 read_lock_bh(&l2cap_sk_list.lock);
4730 sk_for_each(sk, node, &l2cap_sk_list.head) {
4731 struct l2cap_pinfo *pi = l2cap_pi(sk);
4733 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4734 batostr(&bt_sk(sk)->src),
4735 batostr(&bt_sk(sk)->dst),
4736 sk->sk_state, __le16_to_cpu(pi->psm),
4738 pi->imtu, pi->omtu, pi->sec_level);
4741 read_unlock_bh(&l2cap_sk_list.lock);
4746 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4748 return single_open(file, l2cap_debugfs_show, inode->i_private);
4751 static const struct file_operations l2cap_debugfs_fops = {
4752 .open = l2cap_debugfs_open,
4754 .llseek = seq_lseek,
4755 .release = single_release,
4758 static struct dentry *l2cap_debugfs;
4760 static const struct proto_ops l2cap_sock_ops = {
4761 .family = PF_BLUETOOTH,
4762 .owner = THIS_MODULE,
4763 .release = l2cap_sock_release,
4764 .bind = l2cap_sock_bind,
4765 .connect = l2cap_sock_connect,
4766 .listen = l2cap_sock_listen,
4767 .accept = l2cap_sock_accept,
4768 .getname = l2cap_sock_getname,
4769 .sendmsg = l2cap_sock_sendmsg,
4770 .recvmsg = l2cap_sock_recvmsg,
4771 .poll = bt_sock_poll,
4772 .ioctl = bt_sock_ioctl,
4773 .mmap = sock_no_mmap,
4774 .socketpair = sock_no_socketpair,
4775 .shutdown = l2cap_sock_shutdown,
4776 .setsockopt = l2cap_sock_setsockopt,
4777 .getsockopt = l2cap_sock_getsockopt
4780 static const struct net_proto_family l2cap_sock_family_ops = {
4781 .family = PF_BLUETOOTH,
4782 .owner = THIS_MODULE,
4783 .create = l2cap_sock_create,
4786 static struct hci_proto l2cap_hci_proto = {
4788 .id = HCI_PROTO_L2CAP,
4789 .connect_ind = l2cap_connect_ind,
4790 .connect_cfm = l2cap_connect_cfm,
4791 .disconn_ind = l2cap_disconn_ind,
4792 .disconn_cfm = l2cap_disconn_cfm,
4793 .security_cfm = l2cap_security_cfm,
4794 .recv_acldata = l2cap_recv_acldata
4797 static int __init l2cap_init(void)
4801 err = proto_register(&l2cap_proto, 0);
4805 _busy_wq = create_singlethread_workqueue("l2cap");
4809 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4811 BT_ERR("L2CAP socket registration failed");
4815 err = hci_register_proto(&l2cap_hci_proto);
4817 BT_ERR("L2CAP protocol registration failed");
4818 bt_sock_unregister(BTPROTO_L2CAP);
4823 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4824 bt_debugfs, NULL, &l2cap_debugfs_fops);
4826 BT_ERR("Failed to create L2CAP debug file");
4829 BT_INFO("L2CAP ver %s", VERSION);
4830 BT_INFO("L2CAP socket layer initialized");
4835 proto_unregister(&l2cap_proto);
4839 static void __exit l2cap_exit(void)
4841 debugfs_remove(l2cap_debugfs);
4843 flush_workqueue(_busy_wq);
4844 destroy_workqueue(_busy_wq);
4846 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4847 BT_ERR("L2CAP socket unregistration failed");
4849 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4850 BT_ERR("L2CAP protocol unregistration failed");
4852 proto_unregister(&l2cap_proto);
4855 void l2cap_load(void)
4857 /* Dummy function to trigger automatic L2CAP module loading by
4858 * other modules that use L2CAP sockets but don't use any other
4859 * symbols from it. */
4861 EXPORT_SYMBOL(l2cap_load);
4863 module_init(l2cap_init);
4864 module_exit(l2cap_exit);
4866 module_param(disable_ertm, bool, 0644);
4867 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4869 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4870 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4871 MODULE_VERSION(VERSION);
4872 MODULE_LICENSE("GPL");
4873 MODULE_ALIAS("bt-proto-0");